diff --git a/.ci-operator.yaml b/.ci-operator.yaml new file mode 100644 index 0000000000000..7c15f83e3e6b4 --- /dev/null +++ b/.ci-operator.yaml @@ -0,0 +1,4 @@ +build_root_image: + name: release + namespace: openshift + tag: rhel-9-release-golang-1.23-openshift-4.19 diff --git a/.gitignore b/.gitignore index c1915d7a1544f..fc737de69ed13 100644 --- a/.gitignore +++ b/.gitignore @@ -124,3 +124,10 @@ zz_generated_*_test.go # generated by verify-vendor.sh vendordiff.patch + +# Ignore openshift source archives produced as part of rpm build +openshift*.tar.gz + +# Ensure that openapi definitions are not ignored to ensure that +# openshift/origin can vendor them. +!pkg/generated/openapi/zz_generated.openapi.go diff --git a/.openshift-tests-extension/openshift_payload_hyperkube.json b/.openshift-tests-extension/openshift_payload_hyperkube.json new file mode 100644 index 0000000000000..23e86487fa5f1 --- /dev/null +++ b/.openshift-tests-extension/openshift_payload_hyperkube.json @@ -0,0 +1,80079 @@ +[ + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should sign the new added bootstrap tokens [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial] [Disruptive] [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:BootstrapTokens": {}, + "Serial": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the token secret when the secret expired [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should not delete the token secret when the secret is not expired [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-architecture] Conformance Tests should have at least two untainted nodes [Conformance] [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-architecture": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support building a client with a CSR [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthenticator] The kubelet's main port 10250 should reject requests with no credentials [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthenticator": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthenticator] The kubelet can delegate ServiceAccount tokens to the API server [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthenticator": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting an existing secret should exit with the Forbidden error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting an existing configmap should exit with the Forbidden error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting a secret for a workload the node has access to should succeed [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to create another node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to delete another node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] should be able to mount a single ClusterTrustBundle by name [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:ClusterTrustBundle": {}, + "Feature:ClusterTrustBundleProjection": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1beta1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SelfSubjectReview should support SelfSubjectReview API operations [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts no secret-based service account token should be auto-generated [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should mount an API token into pods [Conformance] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should mount projected service account token [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should guarantee kube-root-ca.crt exist in any namespace [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should update a ServiceAccount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SubjectReview should support SubjectReview API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:EphemeralStorage": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide default limits.ephemeral-storage from node allocatable [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:EphemeralStorage": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is root [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] nonexistent volume subPath should have the correct mode and owner using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] files with FSGroup ownership should support (root,0644,tmpfs) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on default medium should have the correct mode using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on tmpfs should have the correct mode using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes pod should support memory backed volumes of specified size [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPath should give a volume the correct mode [LinuxOnly] [NodeConformance] [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPath should support r/w [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPath should support subPath [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected combined should project all components that make up the projection API [Projection] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be immutable if `immutable` field is set [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap Should fail non-optional pod creation due to configMap object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should update labels on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should update annotations on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's cpu limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's cpu request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap Should fail non-optional pod creation due to configMap object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's cpu limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's memory request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret Should fail non-optional pod creation due to secret object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret Should fail non-optional pod creation due to the key in the secret object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be immutable if `immutable` field is set [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets Should fail non-optional pod creation due to secret object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets Should fail non-optional pod creation due to the key in the secret object does not exist [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Volumes NFSv4 should be mountable for NFSv4 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Volumes NFSv3 should be mountable for NFSv3 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Logging soak [Performance] [Slow] [Disruptive] should survive logging 1KB every 1s seconds, for a duration of 2m0s [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from API server. [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Kubelet. [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler. [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from a ControllerManager. [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics slis from API server. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events should manage the lifecycle of an event [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events should delete a collection of events [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events API should ensure that an event can be fetched, patched, deleted, and listed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events API should delete a collection of events [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Metrics should grab all metrics from kubelet /metrics/resource endpoint [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-instrumentation": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated namespace [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny egress from pods based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny egress from all pods in a namespace [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should work with Ingress, Egress specified together [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy based on Ports [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy based on Ports [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:NetworkPolicy": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol API should support creating NetworkPolicy API operations [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol API should support creating NetworkPolicy API with endport field [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet registers plugin [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must retry NodePrepareResources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must not run a pod if a claim is not ready [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must unprepare resources for force-deleted pod [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must call NodePrepareResources even if not used by any container [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must map configs and devices to the right containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports claim and class parameters [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports reusing resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports sharing a claim concurrently [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports sharing a claim sequentially [Slow] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node retries pod scheduling after creating device class [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node retries pod scheduling after updating device class [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node runs a pod without a generated resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports init containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes with network-attached resources schedules onto different nodes [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes with network-attached resources [Serial] [Disruptive] [Slow] must deallocate on non graceful node shutdown [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes reallocation works [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes with node-local resources uses all resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports claim and class parameters [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports reusing resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports sharing a claim concurrently [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports sharing a claim sequentially [Slow] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node retries pod scheduling after creating device class [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node retries pod scheduling after updating device class [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node runs a pod without a generated resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports init containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on multiple nodes with different ResourceSlices keeps pod pending because of CEL runtime errors [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on multiple nodes with node-local resources uses all resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster support validating admission policy for admin access [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster truncates the name of a generated resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster supports count/resourceclaims.resource.k8s.io ResourceQuota [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports scheduled pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports scheduled pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports init containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports scheduled pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports scheduled pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports init containers [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with structured parameters must apply per-node permission checks [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with structured parameters must manage ResourceSlices [Slow] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] multiple drivers using only drapbv1alpha3 work [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should be consumable via environment variable [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should update ConfigMap successfully [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should run through a ConfigMap lifecycle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should be consumable as environment variable names when configmap keys start with a digit [Feature:RelaxedEnvironmentVariableValidation] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RelaxedEnvironmentVariableValidation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a failing exec liveness probe that took longer than the timeout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a non-local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted startup probe fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted by liveness probe because startup probe delays it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted by liveness probe after startup probe enables it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be ready immediately after startupProbe succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should override timeoutGracePeriodSeconds when LivenessProbe field is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should override timeoutGracePeriodSeconds when StartupProbe field is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a GRPC liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a GRPC liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe should not be ready before initial delay and never restart [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe that fails should never be ready and never restart [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a exec \"cat /tmp/health\" liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a /healthz http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a tcp:8080 liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should have monotonically increasing restart count [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a /healthz http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a failing exec liveness probe that took longer than the timeout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a non-local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted startup probe fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted by liveness probe because startup probe delays it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted by liveness probe after startup probe enables it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be ready immediately after startupProbe succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when LivenessProbe field is set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when StartupProbe field is set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a GRPC liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a GRPC liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide pod UID as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide container's limits.hugepages-\u003cpagesize\u003e and requests.hugepages-\u003cpagesize\u003e as env vars [Suite:k8s]", + "labels": { + "Disruptive": {}, + "NodeFeature:DownwardAPIHugePages": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide default limits.hugepages-\u003cpagesize\u003e from node allocatable [Suite:k8s]", + "labels": { + "Disruptive": {}, + "NodeFeature:DownwardAPIHugePages": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Ephemeral Containers [NodeConformance] will start an ephemeral container in an existing pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Ephemeral Containers [NodeConformance] should update the ephemeral containers in an existing pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow substituting values in a container's command [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow substituting values in a volume subpath [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should fail substituting values in a volume subpath with backticks [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should succeed in writing subpaths in container [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion allow almost all printable ASCII characters as environment variable names [Feature:RelaxedEnvironmentVariableValidation] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RelaxedEnvironmentVariableValidation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ImageCredentialProvider [Feature:KubeletCredentialProviders] should be able to create pod with image credentials fetched from external credential provider [Disabled:RebaseInProgress] [Suite:k8s]", + "labels": { + "Feature:KubeletCredentialProviders": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling an agnhost Pod with hostAliases should write entries to /etc/hosts [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet with pods in a privileged namespace when scheduling an agnhost Pod with hostAliases and hostNetwork should write entries to /etc/hosts when hostNetwork is enabled [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Lease lease API should be available [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:PodLifecycleSleepAction": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action reduce GracePeriodSeconds during runtime [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodLifecycleSleepAction": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action ignore terminated container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodLifecycleSleepAction": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeLease NodeLease the kubelet should create and update a lease in the kube-node-lease namespace [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeLease NodeLease should have OwnerReferences set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeLease NodeLease the kubelet should report node status infrequently [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodOSRejection [NodeConformance] Kubelet should reject pod when the node OS doesn't match pod's OS [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should get a host IP [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should be submitted and removed [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should be updated [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should contain environment variables for services [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should support remote command execution over websockets [NodeConformance] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should have their auto-restart back-off timer reset on image update [Slow] [NodeConformance] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should cap back-off at MaxContainerBackOff [Slow] [NodeConformance] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should support pod readiness gates [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should delete a collection of pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should run through the lifecycle of Pods and PodStatus [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should patch a pod status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodTemplates should delete a collection of pod templates [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodTemplates should replace a pod template [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PrivilegedPod [NodeConformance] should enable privileged commands [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull image from invalid registry [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull image [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull from private registry without secret [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret [NodeConformance] [Disabled:Broken] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a non-existent RuntimeClass [NodeConformance] [Conformance] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", + "labels": { + "NodeFeature:RuntimeHandler": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", + "labels": { + "NodeFeature:RuntimeHandler": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should be consumable via the environment [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should fail to create secret due to empty secret key [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should patch a secret [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should be consumable as environment variable names when secret keys start with a digit [Feature:RelaxedEnvironmentVariableValidation] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RelaxedEnvironmentVariableValidation": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 0 [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should run with an explicit non-root user ID [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should not run with an explicit root user ID [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should run with an image specified user ID [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should not run without a specified user ID [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with privileged should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeFeature:HostAccess": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when true [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] User Namespaces for Pod Security Standards [LinuxOnly] with UserNamespacesSupport and UserNamespacesPodSecurityStandards enabled should allow pod [Feature:UserNamespacesPodSecurityStandards] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:UserNamespacesPodSecurityStandards": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls [MinimumKubeletVersion:1.21] [Environment:NotInUserNS] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Environment:NotInUserNS": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls with slashes as separator [MinimumKubeletVersion:1.23] [Environment:NotInUserNS] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Environment:NotInUserNS": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] LimitRange should list, patch and delete a LimitRange by collection [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:LocalStorageCapacityIsolation": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates pod overhead is considered along with resource limits of pods that are allowed to run verify pod overhead is accounted for [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeAffinity is respected if not matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that required NodeAffinity setting is respected if matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if not matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] [Slow] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] PodTopologySpread Filtering validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates Pods with non-empty schedulingGates are blocked on scheduling [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] when PVC has node-affinity to non-existent/illegal nodes, the pod should be scheduled normally if suitable nodes exist [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] validates pod disruption condition is added to the preempted pod [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] PodTopologySpread Preemption validates proper pods are preempted [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be scheduled to node that don't match the PodAntiAffinity terms [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be preferably scheduled to nodes pod can tolerate [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPriorities [Serial] PodTopologySpread Scoring validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] Multi-AZ Clusters should spread the pods of a service across zones [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] Multi-AZ Clusters should spread the pods of a replication controller across zones [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should schedule multiple jobs concurrently [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should not schedule jobs when suspended [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should replace jobs when ReplaceConcurrent [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should be able to schedule after more than 100 missed schedule [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should not emit unexpected warnings [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should remove from active list jobs that have been deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should delete successful finished jobs with limit of one successful job [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should delete failed finished jobs with limit of one job [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should support timezone [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should support CronJob API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Controller Manager should not create/delete replicas across restart [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Scheduler should continue assigning pods to nodes across restart [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Kubelet should not restart containers across restart [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Kube-proxy should recover after being killed accidentally [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion when tasks succeed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use the pod failure policy on exit code to fail the job early [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use the pod failure policy to not count the failure towards the backoffLimit [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use a pod failure policy to ignore failure for an evicted pod; matching on the exit code [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use a pod failure policy to ignore failure for an evicted pod; matching on the DisruptionTarget condition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should not create pods when created in suspend state [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should delete pods when suspended [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should recreate pods only after they have failed if pod replacement policy is set to Failed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should create pods for an Indexed job with completion indexes and specified hostname [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job with successPolicy should succeeded when all indexes succeeded [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job with successPolicy succeededIndexes rule should succeeded even when some indexes remain pending [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job with successPolicy succeededCount rule should succeeded even when some indexes remain pending [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should execute all indexes despite some failing when using backoffLimitPerIndex [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should terminate job execution when the number of failed indexes exceeds maxFailedIndexes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should mark indexes as failed when the FailIndex action is matched in podFailurePolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should remove pods when job is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are not locally restarted [Flaky] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should fail when exceeds active deadline [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should delete a job [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should fail to exceed backoffLimit [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion with CPU requests [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should apply changes to a job status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should manage the lifecycle of a job [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should update the status ready field [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] TTLAfterFinished job should be deleted once it finishes after TTL seconds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ControllerRevision [Serial] should manage the lifecycle of a ControllerRevision [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces should list and delete a collection of PodDisruptionBudgets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should create a PodDisruptionBudget [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should observe PodDisruptionBudget status updated [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should update/patch PodDisruptionBudget status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should observe that the PodDisruptionBudget status is not updated for unmanaged pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: no PDB =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: too few pods, absolute =\u003e should not allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: enough pods, absolute =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage =\u003e should not allow an eviction [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer =\u003e should not allow an eviction [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should block an eviction until the PDB is updated to allow it [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict ready pods with Default UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict ready pods with IfHealthyBudget UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict ready pods with AlwaysAllow UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should not evict unready pods with Default UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should not evict unready pods with IfHealthyBudget UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict unready pods with AlwaysAllow UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should serve a basic image on each replica with a public image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should serve a basic image on each replica with a private image [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should adopt matching pods on creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should release no longer matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should get and update a ReplicationController scale [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should serve a basic image on each replica with a public image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should serve a basic image on each replica with a private image [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should surface a failure condition on a common issue like exceeded quota [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should adopt matching pods on creation and release no longer matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet Replicaset should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet Replace and Patch tests [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should list and delete a collection of ReplicaSets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should validate Replicaset Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should provide basic identity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should adopt matching orphans and release non-matching pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should not deadlock when a pod's predecessor fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications with PVCs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 without failing container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 with failing container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should implement legacy replacement when the update strategy is OnDelete [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should list, patch and delete a collection of StatefulSets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should validate Statefulset Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working zookeeper cluster [Suite:k8s]", + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working redis cluster [Suite:k8s]", + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working mysql cluster [Suite:k8s]", + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working CockroachDB cluster [Suite:k8s]", + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet MinReadySeconds should be honored when enabled [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet AvailableReplicas should get updated accordingly when MinReadySeconds is enabled [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a WhenDeleted policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a OnScaledown policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should not delete PVC with OnScaledown policy if another controller owns the PVC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenDeleted) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenScaled) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should not delete PVCs when there is another controller [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Automatically recreate PVC for pending pod when PVC is missing PVC should be recreated when pod is pending due to missing PVC [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Setting .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Increasing .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Decreasing .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Removing .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should run and stop complex daemon with node affinity [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should not update pod when spec was updated and update strategy is OnDelete [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance] [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should surge pods onto nodes when spec was updated and update strategy is RollingUpdate [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should list and delete a collection of DaemonSets [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should verify changes to a daemon set status [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment reaping should cascade to its replica sets and pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment should delete old replica sets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment should support rollover [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment iterative rollouts should eventually progress [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment test Deployment ReplicaSet orphaning and adoption regarding controllerRef [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment Deployment should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment should support proportional scaling [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment should not disrupt a cloud load-balancer's connectivity during rollout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment should run the lifecycle of a Deployment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment should validate Deployment Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling Autoscaling a service from 1 pod and 3 nodes to 8 pods and \u003e=4 nodes takes less than 15 minutes [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale up at all [Feature:ClusterAutoscalerScalability1] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability1": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale up twice [Feature:ClusterAutoscalerScalability2] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability2": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down empty nodes [Feature:ClusterAutoscalerScalability3] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability3": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability4": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability5": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6] [Suite:k8s]", + "labels": { + "Feature:ClusterAutoscalerScalability6": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale up GPU pool from 0 [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale up GPU pool from 1 [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should not scale GPU pool up if pod does not require GPUs [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale down GPU pool from 1 [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed when there is non autoscaled pool [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down when rescheduling a pod is required and pdb allows for it [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining multiple pods one by one as dictated by pdb [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0 [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0 [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when unprocessed pod is created and is going to be unschedulable [Feature:ClusterScaleUpBypassScheduler] [Suite:k8s]", + "labels": { + "Feature:ClusterScaleUpBypassScheduler": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when unprocessed pod is created and is going to be schedulable [Feature:ClusterScaleUpBypassScheduler] [Suite:k8s]", + "labels": { + "Feature:ClusterScaleUpBypassScheduler": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when unprocessed pod is created and scheduler is not specified to be bypassed [Feature:ClusterScaleUpBypassScheduler] [Suite:k8s]", + "labels": { + "Feature:ClusterScaleUpBypassScheduler": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] DNS horizontal autoscaling [Serial] [Slow] [KubeUp] [sig-cloud-provider-gcp] kube-dns-autoscaler should scale kube-dns pods when cluster size changed [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "KubeUp": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] DNS horizontal autoscaling kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods and verify decision stability [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod and verify decision stability [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light [Slow] Should scale from 2 pods to 1 pod [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods on a busy application with an idle sidecar container [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should not scale up on a busy sidecar with an idle application [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) CustomResourceDefinition Should scale with a CRD targetRef [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with long upscale stabilization window should scale up only after the stabilization period [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale up [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale down [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale up no more than given number of Pods per minute [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale down no more than given number of Pods per minute [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale up no more than given percentage of current Pods per minute [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale down no more than given percentage of current Pods per minute [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range over two stabilization windows [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range with stabilization window and pod limit rate [Suite:k8s]", + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service and client is hostNetwork [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when initial unready endpoints get ready [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack proxy implementation should not be vulnerable to the invalid conntrack state bug [Privileged] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for the cluster [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for the cluster [Provider:GCE] [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should resolve DNS of partial qualified names for the cluster [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for pods for Hostname [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for pods for Subdomain [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for ExternalName services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should support configurable pod DNS nameservers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should support configurable pod resolv.conf [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should work with the pod containing more than 6 DNS search paths and longer than 256 search list characters [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy: ClusterFirstWithHostNet [LinuxOnly] [Disabled:RebaseInProgress] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS HostNetwork spec.Hostname field is not silently ignored and is used for hostname for a Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS HostNetwork spec.Hostname field is silently ignored and the node hostname is used when hostNetwork is set to true for a Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS configMap nameserver Change stubDomain should be able to change stubDomain configuration [Slow] [Serial] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS configMap nameserver Forward PTR lookup should forward PTR records lookup to upstream nameserver [Slow] [Serial] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS configMap nameserver Forward external name lookup should forward externalname lookup to upstream nameserver [Slow] [Serial] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:PerformanceDNS] [Serial] Should answer DNS query for maximum number of services per cluster [Slow] [Suite:k8s]", + "labels": { + "Feature:PerformanceDNS": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should have ipv4 and ipv6 internal node ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to pod ips [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to host ips [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should be able to reach pod on ipv4 and ipv6 ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create a single stack service with cluster ip from primary service range [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv4 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv6 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv4,v6 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv6,v4 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: http [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: udp [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for service endpoints using hostNetwork [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should have Endpoints and EndpointSlices pointing to API Server [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should support creating EndpointSlice API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should support a Service with multiple ports specified in multiple EndpointSlices [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should support a Service with multiple endpoint IPs specified in multiple EndpointSlices [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSliceMirroring should mirror a custom Endpoints resource through create update and delete [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSliceMirroring should mirror a custom Endpoint with multiple subsets and same IP address [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] ClusterDns [Feature:Example] should create pod that uses dns [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Example": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] CVE-2021-29923 IPv4 Service Type ClusterIP with leading zeros should work interpreted as decimal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Ingress API should support creating Ingress API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should set default value on new IngressClass [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should not set default value if no default IngressClass [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should allow IngressClass to have Namespace-scoped parameters [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass API should support creating IngressClass API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] KubeProxy should set TCP CLOSE_WAIT timeout [Privileged] [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] KubeProxy should update metric for tracking accepted packets destined for localhost nodeports [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should work for type=LoadBalancer [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should only target nodes with endpoints [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should work from pods [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should handle updates to ExternalTrafficPolicy field [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking IPerf2 [Feature:Networking-Performance] should run iperf2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Networking-Performance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] NoSNAT Should be able to send traffic between Pods without SNAT [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Connectivity Pod Lifecycle should be able to connect from a Pod to a terminating Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod [Disabled:RebaseInProgress] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 should proxy logs on node using proxy subresource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service Proxy [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to change the type and ports of a TCP service [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to change the type and ports of a UDP service [Slow] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should only allow access from service loadbalancer source ranges [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should have session affinity work for LoadBalancer service with Local traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to switch session affinity for LoadBalancer service with Local traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should have session affinity work for LoadBalancer service with Cluster traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to switch session affinity for LoadBalancer service with Cluster traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should handle load balancer cleanup finalizer for service [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to create LoadBalancer Service without NodePort and change it [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should not have connectivity disruption during rolling update with externalTrafficPolicy=Cluster [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should not have connectivity disruption during rolling update with externalTrafficPolicy=Local [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4] [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Networking-IPv4": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv6] [Experimental][LinuxOnly] [Disabled:Broken] [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:k8s]", + "labels": { + "Feature:Networking-IPv6": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provider Internet connection for containers using DNS [Feature:Networking-DNS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Networking-DNS": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provide unchanging, static URL paths for kubernetes api services [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should check kube-proxy urls [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for pod-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for pod-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for pod-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for node-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for node-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for node-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for multiple endpoint-Services with same selector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update endpoints: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update endpoints: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update nodePort: http [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should support basic nodePort: udp functionality [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update nodePort: udp [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: http [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: udp [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should be able to handle large requests: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should be able to handle large requests: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for service endpoints using hostNetwork [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should recreate its iptables rules if they are deleted [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should allow creating a Pod with an SCTP HostPort [LinuxOnly] [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should provide secure master service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve a basic endpoint from pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve multiport endpoints from pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be updated after adding or deleting ports [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should preserve source pod IP for traffic thru service cluster IP [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should allow pods to hairpin back to themselves through services [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to up and down services [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should work after the service has been recreated [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should work after restarting kube-proxy [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should work after restarting apiserver [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to create a functioning NodePort service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be possible to connect to a service via ExternalIP when the external IP is not assigned to a node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to update service type to NodePort listening on same port number but different protocols [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from ExternalName to NodePort [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from NodePort to ExternalName [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should prevent NodePort collisions [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should check NodePort out-of-range [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should release NodePorts on delete [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should create endpoints for unready pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should not be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity work for NodePort service [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should implement service.kubernetes.io/service-proxy-name [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should implement service.kubernetes.io/headless [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be rejected when no endpoints exist [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be rejected for evicted pods (no endpoints exist) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should respect internalTrafficPolicy=Local Pod to Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should respect internalTrafficPolicy=Local Pod and Node, to Pod (hostNetwork: true) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should support externalTrafficPolicy=Local for type=NodePort [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fail health check node port if there are only terminating endpoints [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should find a service from listing all namespaces [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should test the lifecycle of an Endpoint [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should complete a service status lifecycle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should delete a collection of services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve endpoints on same port and different protocols [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve endpoints on same port and different protocol for internal traffic on Type LoadBalancer [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should allow creating a basic SCTP service with pod and endpoints [LinuxOnly] [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:ServiceCIDRs] should create Services and servce on different Service CIDRs [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:ServiceCIDRs": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Service endpoints latency should not be very high [Conformance] [Serial] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:Topology Hints] should distribute endpoints evenly [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:Topology Hints": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:Traffic Distribution] when Service has trafficDistribution=PreferClose should route traffic to an endpoint that is close to the client [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Traffic Distribution": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified on the pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified on the container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified in annotations [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles can disable an AppArmor profile, using unconfined [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] crictl should be able to run crictl on the node [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:Example] Liveness liveness pods should be automatically restarted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Example": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:Example] Secret should create a pod that reads a secret [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Example": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:Example] Downward API should create a pod that prints his name and namespace [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Example": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:GPUDevicePlugin] Sanity test for Nvidia Device should run nvidia-smi cli [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:GPUDevicePlugin": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:GPUDevicePlugin] Sanity test for Nvidia Device should run gpu based matrix multiplication [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:GPUDevicePlugin": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s. [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet host cleanup with volume mounts [HostCleanup] [Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (sleeping) client pod, the NFS mount and the pod's UID directory should be removed. [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet host cleanup with volume mounts [HostCleanup] [Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (active) client pod, the NFS mount and the pod's UID directory should be removed. [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the error with an empty --query option [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs for the current boot [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the last three lines of the kubelet logs [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs for the current boot with the pattern container [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs since the current date and time [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the Microsoft-Windows-Security-SPP logs [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the last three lines of the Microsoft-Windows-Security-SPP logs [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the Microsoft-Windows-Security-SPP logs with the pattern Health [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 0 pods per node [Suite:k8s]", + "labels": { + "Feature:RegularResourceUsageTracking": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 100 pods per node [Suite:k8s]", + "labels": { + "Feature:RegularResourceUsageTracking": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet [Serial] [Slow] experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking] resource tracking for 100 pods per node [Suite:k8s]", + "labels": { + "Feature:ExperimentalResourceUsageTracking": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Mount propagation should propagate mounts within defined scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeProblemDetector [NodeFeature:NodeProblemDetector] should run without error [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "NodeFeature:NodeProblemDetector": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod garbage collector [Feature:PodGarbageCollector] [Slow] should handle the creation of 1000 pods [Suite:k8s]", + "labels": { + "Feature:PodGarbageCollector": {}, + "Slow": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Serial] Pod InPlace Resize Container (scheduler-focused) [Feature:InPlacePodVerticalScaling] pod-resize-scheduler-tests [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU \u0026 memory [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU \u0026 memory [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU \u0026 decrease memory [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU \u0026 increase memory [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory limits only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory limits only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU limits only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU limits only [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests and limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests and limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests and increase CPU limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests and decrease CPU limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests and limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests and limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests and increase memory limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests and decrease memory limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests and increase memory limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests and decrease memory limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests and increase CPU limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests and decrease CPU limits [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests - decrease memory request [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU (NotRequired) \u0026 memory (RestartContainer) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container - decrease CPU (RestartContainer) \u0026 memory (NotRequired) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - increase c1 resources, no change for c2, decrease c3 resources (no net change for pod) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - decrease c1 resources, increase c2 resources, no change for c3 (net increase for pod) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - no change for c1, increase c2 resources, decrease c3 (net decrease for pod) [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] pod-resize-resource-quota-test [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] BestEffort pod - try requesting memory, expect error [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Delete Grace Period should be submitted and removed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container Status should never report success for a pending container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container Status should never report container start when an init container fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container lifecycle should not create extra sandbox if all containers are done [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container lifecycle evicted pods should be terminal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod TerminationGracePeriodSeconds is negative pod with negative grace period [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PreStop should call prestop when killing a pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PreStop graceful pod terminated should wait until preStop hook completes the process [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with conflicting node selector [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling with taints [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling without taints [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when if the container's primary UID belongs to some groups in the image [LinuxOnly] should add pod.Spec.SecurityContext.SupplementalGroups to them [LinuxOnly] in resultant supplementary groups for the container processes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was not set if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SupplementalGroupsPolicy": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was set to Merge if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SupplementalGroupsPolicy": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was set to Strict even if the container's primary UID belongs to some groups in the image, it should not add SupplementalGroups to them [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:SupplementalGroupsPolicy": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support container.SecurityContext.RunAsUser [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support volume SELinux relabeling [Flaky] [LinuxOnly] [Suite:k8s]", + "labels": { + "Flaky": {}, + "LinuxOnly": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly] [Suite:k8s]", + "labels": { + "Flaky": {}, + "LinuxOnly": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly] [Suite:k8s]", + "labels": { + "Flaky": {}, + "LinuxOnly": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp unconfined on the container [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp unconfined on the pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp runtime/default [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp default which is unconfined [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] SSH should SSH to all nodes and run commands [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] evicts pods from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] doesn't evict pod with tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] eventually evict pod with finite tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance] [Skipped:SingleReplicaTopology] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Disruptive": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] pods evicted from tainted nodes have pod disruption condition [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] only evicts pods without tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance] [Skipped:SingleReplicaTopology] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Disruptive": {}, + "Serial": {}, + "sig-node": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSIInlineVolumes should support CSIVolumeSource in Pod API [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSIInlineVolumes should run through the lifecycle of a CSIDriver [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "MinimumKubeletVersion:1.27": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "MinimumKubeletVersion:1.27": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "MinimumKubeletVersion:1.27": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "MinimumKubeletVersion:1.27": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSIStorageCapacity should support CSIStorageCapacities API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:Flexvolumes] Detaching volumes should not work when mount is in progress [Slow] [Suite:k8s]", + "labels": { + "Feature:Flexvolumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir wrapper volumes should not conflict [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for git_repo [Serial] [Slow] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : secret [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : configmap [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : projected [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Flexvolumes should be mountable when non-attachable [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Flexvolumes should be mountable when attachable [Feature:Flexvolumes] [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Flexvolumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume expand [Slow] Should verify mounted flex volumes can be resized [Suite:k8s]", + "labels": { + "Feature:Flexvolumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume volume expand [Slow] should be resizable when mounted [Suite:k8s]", + "labels": { + "Feature:Flexvolumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting non-existent directory 'does-not-exist-dir' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting non-existent file 'does-not-exist-file' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting non-existent socket 'does-not-exist-socket' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting non-existent character device 'does-not-exist-char-dev' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting non-existent block device 'does-not-exist-blk-dev' when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathBlockDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathDirectory [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathFile [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathSocket [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathCharDev [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Disabled:Broken] [Suite:k8s]", + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Mounted volume expand [Feature:StorageProvider] Should verify mounted devices can be resized [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kube-controller-manager restarts should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [LinuxOnly] NonGracefulNodeShutdown [NonGracefulNodeShutdown] pod that uses a persistent volume via gce pd driver should get immediately rescheduled to a different node after non graceful node shutdown [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:NodeOutOfServiceVolumeDetach": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-expansion loopback local block volume should support online expansion on node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Skipped:gce] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Skipped:gce] [Suite:k8s]", + "labels": { + "Flaky": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to non-existent path [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to wrong node [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeAffinity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeSelector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod has anti-affinity [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod has affinity [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod management is parallel and pod has anti-affinity [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod management is parallel and pod has affinity [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Stress with local volumes [Serial] should be able to process many pods and reuse local volumes [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs should create a non-pre-bound PV and PVC: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and non-pre-bound PV: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and a pre-bound PV: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV: test phase transition timestamp is set and phase is Available [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp is set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp multiple updates [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 2 PVs and 4 PVCs: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 3 PVs and 3 PVCs: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS when invoking the Recycle reclaim policy should test that a PV becomes Available and is clean after the PVC is deleted. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes CSI Conformance should run through the lifecycle of a PV and a PVC [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes CSI Conformance should apply changes to a pv/pvc status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes Default StorageClass [LinuxOnly] pods that use multiple volumes should be reschedulable [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PV Protection Verify \"immediate\" deletion of a PV that is not bound to a PVC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PV Protection Verify that PV bound to a PVC is not removed immediately [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PVC Protection Verify \"immediate\" deletion of a PVC that is not in active use by a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PVC Protection Verify that PVC in active use by a pod is not removed immediately [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PVC Protection Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Persistent Volume Claim and StorageClass Retroactive StorageClass assignment [Serial] [Disruptive] should assign default SC to PVCs that have no SC set [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] StaticPods [Feature:Kind] should run after kubelet stopped with CSI volume mounted [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Kind": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] StorageClasses CSI Conformance should run through the lifecycle of a StorageClass [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with downward pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Container restart should verify that container can restart successfully after configmaps modified [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] VolumeAttachment Conformance should run through the lifecycle of a VolumeAttachment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning and attach/detach [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning errors [Slow] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct FilesystemMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct BlockMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create metrics for total time taken in volume operations in P/V Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create volume metrics in Volume Manager [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create metrics for total number of volumes in A/D Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning and attach/detach [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning errors [Slow] [Suite:k8s]", + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct FilesystemMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct BlockMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total time taken in volume operations in P/V Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics in Volume Manager [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total number of volumes in A/D Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create none metrics for pvc controller before creating any PV or PVC [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create unbound pv count metrics for pvc controller after creating pv only [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller after creating pvc only [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller with volume attributes class dimension after creating pvc only [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create bound pv/pvc count metrics for pvc controller with volume attributes class dimension after creating both pv and pvc [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create total pv count metrics for with plugin and volume mode labels after creating pv [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with different parameters [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with non-default reclaim policy Retain [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should test that deleting a claim before the volume is provisioned deletes the volume. [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] deletion should be idempotent [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner External should let an external dynamic provisioner create and delete persistent volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should create and delete default persistent volumes [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by changing the default annotation [Serial] [Disruptive] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by removing the default annotation [Serial] [Disruptive] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] VolumeAttributesClass [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should run through the lifecycle of a VolumeAttributesClass [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Volumes ConfigMap should be mountable [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should not require VolumeAttach for drivers without attachment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for drivers with attachment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for ephemermal volume and drivers with attachment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should preserve attachment policy when no CSIDriver present [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI CSIDriver deployment after pod creation using non-attachable mock driver should bringup pod after deploying CSIDriver attach=false [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should not pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=default [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=File [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should not modify fsGroup if fsGroupPolicy=None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from None to File [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from None to default [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should not update fsGroup if update from File to None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from File to default [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should not update fsGroup if update from detault to None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from detault to File [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Dynamic provisioning should honor pv delete reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Dynamic provisioning should honor pv retain reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Static provisioning should honor pv delete reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Static provisioning should honor pv retain reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage success [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage final error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage ephemeral error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage ephemeral error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should not call NodeUnstage after NodeStage final error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] should call NodeStage after NodeUnstage success [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage final error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage transient error [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should pass SELinux mount option for RWOP volume and Pod with SELinux context set [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should add SELinux mount option to existing mount options [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for RWO volume with SELinuxMount disabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "Feature:SELinuxMountReadWriteOncePodOnly": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should pass SELinux mount option for RWO volume with SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for Pod without SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for CSI driver that does not support SELinux mount [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not unstage RWOP volume when starting a second pod with the same SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWOP volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not unstage RWO volume when starting a second pod with the same SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWO volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] warning is not bumped on two Pods with the same context on RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "Feature:SELinuxMountReadWriteOncePodOnly": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] warning is bumped on two Pods with a different context on RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "Feature:SELinuxMountReadWriteOncePodOnly": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWX volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWOP volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when csiServiceAccountTokenEnabled=false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when CSIDriver is not deployed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should be plumbed down when csiServiceAccountTokenEnabled=true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots [Feature:VolumeSnapshotDataSource] volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots secrets [Feature:VolumeSnapshotDataSource] volume snapshot create/delete with secrets [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit dynamic CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit pre-provisioned CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity unlimited [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, immediate binding [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, no topology [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, with topology [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity unused [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity disabled [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, no capacity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, insufficient capacity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, have capacity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume without restarting pod if nodeExpansion=off [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not have staging_path missing in node expand volume pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not expand volume if resizingOnDriver=off, resizingOnSC=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI online volume expansion with secret should expand volume without restarting pod if attach=on, nodeExpansion=on, csiNodeExpandSecret=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should record target size in allocated resources [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RecoverVolumeExpansionFailure": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with final error [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RecoverVolumeExpansionFailure": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should not be possible in partially expanded volumes [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:RecoverVolumeExpansionFailure": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit when limit is bigger than 0 [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for generic ephemeral volume when persistent volume is attached [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for persistent volume when generic ephemeral volume is attached [Slow] [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver should be passed when podInfoOnMount=true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver contain ephemeral=true when using inline volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when podInfoOnMount=false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when CSIDriver does not exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI PodInfoOnMount Update should not be passed when update from true to false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI PodInfoOnMount Update should be passed when update from false to true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-storage": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] stateful Upgrade [Feature:StatefulUpgrade] stateful upgrade should maintain a functioning cluster [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:StatefulUpgrade": {}, + "sig-apps": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccount admission controller migration [Feature:BoundServiceAccountTokenVolume] master upgrade should maintain a functioning cluster [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:BoundServiceAccountTokenVolume": {}, + "sig-auth": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] kube-proxy migration [Feature:KubeProxyDaemonSetMigration] Upgrade kube-proxy from static pods to a DaemonSet should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade] [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:KubeProxyDaemonSetMigration": {}, + "Feature:KubeProxyDaemonSetUpgrade": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] kube-proxy migration [Feature:KubeProxyDaemonSetMigration] Downgrade kube-proxy from a DaemonSet to static pods should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade] [Disabled:Unimplemented] [Suite:k8s]", + "labels": { + "Feature:KubeProxyDaemonSetDowngrade": {}, + "Feature:KubeProxyDaemonSetMigration": {}, + "sig-network": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] master upgrade should maintain a functioning cluster [Feature:MasterUpgrade] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:MasterUpgrade": {}, + "Feature:Upgrade": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] cluster upgrade should maintain a functioning cluster [Feature:ClusterUpgrade] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:ClusterUpgrade": {}, + "Feature:Upgrade": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Downgrade [Feature:Downgrade] cluster downgrade should maintain a functioning cluster [Feature:ClusterDowngrade] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:ClusterDowngrade": {}, + "Feature:Downgrade": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] GKE node pools [Feature:GKENodePool] should create a cluster with multiple node pools [Feature:GKENodePool] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:GKENodePool": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas same zone [Serial] [Disruptive] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:HAMaster": {}, + "Serial": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas different zones [Serial] [Disruptive] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:HAMaster": {}, + "Serial": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas multizone workers [Serial] [Disruptive] [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:HAMaster": {}, + "Serial": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to the readonly kubelet port 10255 using proxy subresource [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to cadvisor port 4194 using proxy subresource [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 10255 open on its all public IP addresses [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 4194 open on its all public IP addresses [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] [Disruptive] NodeLease NodeLease deletion node lease should be deleted when corresponding node is deleted [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering clean reboot and ensure they function upon restart [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering unclean reboot and ensure they function upon restart [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by triggering kernel panic and ensure they function upon restart [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by switching off the network interface and ensure they function upon switch on [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all inbound packets for a while and ensure they function afterwards [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all outbound packets for a while and ensure they function afterwards [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to delete nodes [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to add nodes [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Restart [Disruptive] [KubeUp] should restart all nodes and ensure all nodes and pods recover [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "KubeUp": {}, + "sig-cloud-provider-gcp": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider] [Feature:CloudProvider] [Disruptive] Nodes should be deleted on API server if it doesn't exist in the cloud provider [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:CloudProvider": {}, + "sig-cloud-provider": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Cpu Resources [Serial] Container limits should not be exceeded after waiting 2 minutes [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Density [Serial] [Slow] create a batch of pods latency/resource should be within limit when create 10 pods with 0s interval [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:GPUDevicePlugin] Device Plugin should be able to create a functioning device plugin for Windows [Disabled:SpecialConfig] [Suite:k8s]", + "labels": { + "Feature:GPUDevicePlugin": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] DNS should support configurable pod DNS servers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Eviction [Serial] [Slow] [Disruptive] should evict a pod when a node experiences memory pressure [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support works end to end [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support can read and write file to remote SMB folder [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] GMSA Kubelet [Slow] kubelet GMSA support when creating a pod with correct GMSA credential specs passes the credential specs down to the Pod's containers [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as a process on the host/node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support init containers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container command path validation [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support various volume mount types [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers metrics should report count of started and failed to start HostProcess containers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container stats validation [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support querying api-server using in-cluster config [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as localgroup accounts [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Hybrid cluster network for all supported CNIs should have stable networking for Linux and Windows pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection for Linux containers [Feature:Networking-IPv4] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Networking-IPv4": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection and DNS for Windows containers [Feature:Networking-IPv4] [Feature:Networking-DNS] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Networking-DNS": {}, + "Feature:Networking-IPv4": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHyperVContainers] HyperV containers should start a hyperv isolated container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:WindowsHyperVContainers": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Kubelet-Stats [Serial] Kubelet stats collection for Windows nodes when running 10 pods should return within 10 seconds [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when windows is booted should return bootid within 10 seconds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when running 3 pods should return within 10 seconds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] Allocatable node memory should be equal to a calculated allocatable memory value [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] attempt to deploy past allocatable memory limits should fail deployments of pods once there isn't enough memory [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletVersion:1.22] RebootHost containers [Serial] [Disruptive] [Slow] should run as a reboot process on the host/node [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should be able create pods and run containers with a given username [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Pod level [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Container level [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should override SecurityContext username if set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should ignore Linux Specific SecurityContext if set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as ContainerAdministrator when runAsNonRoot is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as CONTAINERADMINISTRATOR when runAsNonRoot is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should be able to create pod and run containers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Services should be able to create a functioning NodePort service for Windows [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on emptyDir [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on hostMapPath [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support raw aggregated discovery endpoint Accept headers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support raw aggregated discovery request for CRDs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support aggregated discovery interface [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support aggregated discovery interface for CRDs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] kube-apiserver identity [Feature:APIServerIdentity] kube-apiserver identity should persist after restart [Disruptive] [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "Feature:APIServerIdentity": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should create an applied object if it does not already exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should work for subresources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should remove a field if it is owned but removed in the apply request [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should not remove a field if an owner unsets the field but other managers still have ownership of the field [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should ignore conflict errors if force apply is used [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should work for CRDs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should give up ownership of a field if forced applied by a controller [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for API chunking should return chunks of results for list calls [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for API chunking should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow] [Conformance] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert from CR v1 to CR v2 [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD without validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of different groups [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] [Flaky] kubectl explain works for CR with the same resource name as built-in object. [Suite:k8s]", + "labels": { + "Flaky": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceFieldSelectors [Privileged:ClusterAdmin] [FeatureGate:CustomResourceFieldSelectors] [Beta] CustomResourceFieldSelectors MUST list and watch custom resources matching the field selector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CustomResourceFieldSelectors": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT fail to update a resource due to JSONSchema errors on unchanged correlatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to JSONSchema errors on unchanged uncorrelatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to JSONSchema errors on changed fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT fail to update a resource due to CRD Validation Rule errors on unchanged correlatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to CRD Validation Rule errors on unchanged uncorrelatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to CRD Validation Rule errors on changed fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT ratchet errors raised by transition rules [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST evaluate a CRD Validation Rule with oldSelf = nil for new values when optionalOldSelf is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validations rules [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validations rules [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains a x-kubernetes-validations rule that refers to a property that do not exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that contains a syntax error [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that exceeds the estimated cost limit [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource that exceeds the runtime cost limit for x-kubernetes-validations rule execution [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail update of a custom resource that does not satisfy a x-kubernetes-validations transition rule [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] custom resource defaulting for requests and from storage works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery should accurately determine present and missing resources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery Custom resource should have storage version hash [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery should validate PreferredVersion for each APIGroup [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery should locate the groupVersion and a resource within each APIGroup [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Etcd failure [Disruptive] should recover from network partition with master [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Etcd failure [Disruptive] should recover from SIGKILL [Serial] [Suite:k8s]", + "labels": { + "Disruptive": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect unknown and duplicate fields of a typed object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect unknown metadata fields of a typed object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should create/apply a valid CR for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should create/apply a CR with unknown fields for CRD with no validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should create/apply an invalid CR with extra properties for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect unknown metadata fields in both the root and embedded object of a CR [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect duplicates in a CR when preserving unknown fields [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (priority) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (fairness) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should support FlowSchema API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should support PriorityLevelConfiguration API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should support cascading deletion of custom resources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should support orphan deletion of custom resources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should delete jobs and pods created by cronjob [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Generated clientset should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Generated clientset should create v1 cronJobs, delete cronJobs, watch cronJobs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] health handlers should contain necessary checks [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should ensure that all pods are removed when a namespace is deleted [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should delete fast enough (90 percent of 100 namespaces in 150 seconds) [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Feature:ComprehensiveNamespaceDraining": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should apply changes to a namespace status [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should apply an update to a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should apply a finalizer to a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] OpenAPIV3 should round trip OpenAPI V3 for all built-in group versions [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] OpenAPIV3 should publish OpenAPI V3 for CustomResourceDefinition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] OpenAPIV3 should contain OpenAPI V3 for Aggregated APIServer [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf,application/json\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json,application/vnd.kubernetes.protobuf\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Server request timeout should return HTTP status code 400 if the user specifies an invalid timeout in the request URL [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Server request timeout the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Server request timeout default timeout should be used if the specified timeout in the request URL is 0s [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a ResourceClaim [Feature:DynamicResourceAllocation] [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a custom resource. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should manage the lifecycle of a ResourceQuota [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should apply changes to a resourcequota status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope using scope-selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes through scope selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn). [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists). [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] server version should find the server version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] StorageVersion resources [Feature:StorageVersionAPI] storage version with non-existing id should be GC'ed [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:StorageVersionAPI": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return pod details [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return chunks of table results for list calls [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return generic metadata details across all namespaces for nodes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should validate against a Deployment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should type check validation expressions [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should allow expressions to refer variables. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should type check a CRD [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should support ValidatingAdmissionPolicy API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should support ValidatingAdmissionPolicyBinding API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested by informers when WatchListClient is enabled [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:WatchList": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested by client-go's List method when WatchListClient is enabled [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:WatchList": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested by dynamic client's List method when WatchListClient is enabled [Disabled:Alpha] [Suite:k8s]", + "labels": { + "Feature:WatchList": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny attaching pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a mutating webhook should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update validating webhook configurations with match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update mutating webhook configurations with match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject validating webhook configurations with invalid match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject mutating webhook configurations with invalid match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate everything except 'skip-me' configmaps [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl delete interactive based on user confirmation input [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl exec should be able to execute 1000 times in a container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs logs should be able to retrieve and filter logs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs default container logs the second container is the default-container by annotation should log default container if not specified [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from all pods based on default container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from each pod and each container in Deployment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance] [Slow] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec using resource/name [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes should support port-forward [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes should handle in-cluster config [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run running a successful command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run running a failing command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never, but with --rm [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command with --leave-stdin-open [Suite:k8s]", + "labels": { + "Slow": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support inline execution and attach [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support inline execution and attach with websockets or fallback to spdy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should contain last line of the log [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl prune with applyset should apply and prune objects [Disabled:RebaseInProgress] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should create/apply a CR with unknown fields for CRD with no validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should create/apply a valid CR for CRD with validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields in both the root and embedded object of a CR [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields of a typed object [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "labels": { + "Serial": {}, + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl events should show event when pod is created [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete [Disabled:Broken] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client kubectl subresource flag should not be used in a bulk GET [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client kubectl subresource flag GET on status subresource of built-in type (node) returns identical info as GET on the built-in type [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl rollout undo undo should rollback and update deployment env [Suite:openshift/conformance/parallel] [Suite:k8s]", + "labels": { + "sig-cli": {} + }, + "resources": { + "isolation": {} + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + } +] \ No newline at end of file diff --git a/.snyk b/.snyk new file mode 100644 index 0000000000000..dd23598634792 --- /dev/null +++ b/.snyk @@ -0,0 +1,9 @@ +# References: +# https://docs.snyk.io/scan-applications/snyk-code/using-snyk-code-from-the-cli/excluding-directories-and-files-from-the-snyk-code-cli-test +# https://docs.snyk.io/snyk-cli/commands/ignore +exclude: + global: + - "**/vendor/**" + - "**/*_test.go" + - "**/testdata/**" + - "**/cluster/**" diff --git a/DOWNSTREAM_OWNERS b/DOWNSTREAM_OWNERS new file mode 100644 index 0000000000000..ad48a46ecdd6d --- /dev/null +++ b/DOWNSTREAM_OWNERS @@ -0,0 +1,32 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +filters: + ".*": + # Downstream reviewers, don't have to match those in OWNERS + reviewers: + - bertinatto + - deads2k + - jerpeter1 + - p0lyn0mial + - soltysh + - tkashem + + # Approvers are limited to the team that manages rebases and pays the price for carries that are introduced + approvers: + - bertinatto + - deads2k + - jerpeter1 + - p0lyn0mial + - soltysh + - tkashem + + "^\\.go.(mod|sum)$": + labels: + - "vendor-update" + "^vendor/.*": + labels: + - "vendor-update" + "^staging/.*": + labels: + - "vendor-update" +component: kube-apiserver diff --git a/README.openshift.md b/README.openshift.md new file mode 100644 index 0000000000000..b04871fc09c0f --- /dev/null +++ b/README.openshift.md @@ -0,0 +1,73 @@ +# OpenShift's fork of k8s.io/kubernetes + +This respository contains core Kubernetes components with OpenShift-specific patches. + +## Cherry-picking an upstream commit into openshift/kubernetes: Why, how, and when. + +`openshift/kubernetes` carries patches on top of each rebase in one of two ways: + +1. *periodic rebases* against an upstream Kubernetes tag. Eventually, +any code you have in upstream Kubernetes will land in Openshift via +this mechanism. + +2. Cherry-picked patches for important *bug fixes*. We really try to +limit feature back-porting entirely. Unless there are exceptional circumstances, your backport should at least be merged in kubernetes master branch. With every carry patch (not included in upstream) you are introducing a maintenance burden for the team managing rebases. + +### For Openshift newcomers: Pick my Kubernetes fix into Openshift vs. wait for the next rebase? + +Assuming you read the bullets above... If your patch is really far behind, for +example, if there have been 5 commits modifying the directory you care about, +cherry picking will be increasingly difficult and you should consider waiting +for the next rebase, which will likely include the commit you care about or at +least decrease the amount of cherry picks you need to do to merge. + +To really know the answer, you need to know *how many commits behind you are in +a particular directory*, often. + +To do this, just use git log, like so (using pkg/scheduler/ as an example). + +``` +MYDIR=pkg/scheduler/algorithm git log --oneline -- + ${MYDIR} | grep UPSTREAM | cut -d' ' -f 4-10 | head -1 +``` + +The commit message printed above will tell you: + +- what the LAST commit in Kubernetes was (which effected +"/pkg/scheduler/algorithm") +- directory, which will give you an intuition about how "hot" the code you are +cherry picking is. If it has changed a lot, recently, then that means you +probably will want to wait for a rebase to land. + +### Cherry-picking an upstream change + +Since `openshift/kubernetes` closely resembles `k8s.io/kubernetes`, +cherry-picking largely involves proposing upstream commits in a PR to our +downstream fork. Other than the usual potential for merge conflicts, the +commit messages for all commits proposed to `openshift/kubernetes` must +reflect the following: + +- `UPSTREAM: :` The prefix for upstream commits to ensure + correct handling during a future rebase. The person performing the rebase + will know to omit a commit with this prefix if the referenced PR is already + present in the new base history. +- `UPSTREAM: :` The prefix for downstream commits of code that is + generated (i.e. via `make update`) or that should not be retained by the + next rebase. +- `UPSTREAM: :` The prefix for downstream commits that maintain + downstream-specific behavior (i.e. to ensure an upstream change is + compatible with OpenShift). Commits with this are usually retained across + rebases. + +## Updating openshift/kubernetes to a new upstream release + +Instructions for rebasing `openshift/kubernetes` are maintained in a [separate +document](REBASE.openshift.md). + +## RPM Packaging + +A specfile is included in this repo which can be used to produce RPMs +including the openshift binary. While the specfile will be kept up to +date with build requirements the version is not updated. Building the +rpm with the `openshift-hack/build-rpms.sh` helper script will ensure +that the version is set correctly. diff --git a/REBASE.openshift.md b/REBASE.openshift.md new file mode 100644 index 0000000000000..68e185b3342bb --- /dev/null +++ b/REBASE.openshift.md @@ -0,0 +1,536 @@ +# Maintaining openshift/kubernetes + +OpenShift is based on upstream Kubernetes. With every release of Kubernetes that is +intended to be shipped as OCP, it is necessary to incorporate the upstream changes +while ensuring that our downstream customizations are maintained. + +## Rebasing for releases < 4.6 + +The instructions in this document apply to OpenShift releases 4.6 and +above. For previous releases, please see the [rebase +enhancement](https://github.com/openshift/enhancements/blob/master/enhancements/rebase.md). + +## Maintaining this document + +An openshift/kubernetes rebase is a complex process involving many manual and +potentially error-prone steps. If, while performing a rebase, you find areas where +the documented procedure is unclear or missing detail, please update this document +and include the change in the rebase PR. This will ensure that the instructions are +as comprehensive and accurate as possible for the person performing the next +rebase. + +## Rebase Checklists + +The checklists provided below highlight the key responsibilities of +someone performing an openshift/kubernetes rebase. + +In preparation for submitting a PR to the [openshift fork of +kubernetes](https://github.com/openshift/kubernetes), the following +should be true: + +- [ ] The new rebase branch has been created from the upstream tag +- [ ] The new rebase branch includes relevant carries from target branch +- [ ] Dependencies have been updated +- [ ] Hyperkube dockerfile version has been updated +- [ ] `make update` has been invoked and the results committed +- [ ] `make` executes without error +- [ ] `make verify` executes without error +- [ ] `make test` executes without error +- [ ] The upstream tag is pushed to `openshift/kubernetes` to ensure that + build artifacts are versioned correctly + - Upstream tooling uses the value of the most recent tag (e.g. `v1.25.0`) + in the branch history as the version of the binaries it builds. + - Pushing the tag is easy as +``` +git push git@github.com:openshift/kubernetes.git refs/tags/v1.25.0 +``` + +Details to include in the description of the PR: + +- [ ] A link to the rebase spreadsheet for the benefit for reviewers + +After the rebase PR has merged to `openshift/kubernetes`, vendor the changes +into `openshift/origin` to ensure that the openshift-tests binary reflects +the upstream test changes introduced by the rebase: + +- [ ] Find the SHA of the merge commit after your PR lands in `openshift/kubernetes` +- [ ] Run `hack/update-kube-vendor.sh ` in a clone of the `origin` + repo and commit the results +- [ ] Run `make update` and commit the results +- [ ] Submit as a PR to `origin` + +As a final step, send an email to the aos-devel mailing list announcing the +rebase. Make sure to include: + +- [ ] The new version of upstream Kubernetes that OpenShift is now based on +- [ ] Link(s) to upstream changelog(s) detailing what has changed since the last rebase landed +- [ ] A reminder to component maintainers to bump their dependencies +- [ ] Relevant details of the challenges involved in landing the rebase that + could benefit from a wider audience. + +## Getting started + +Before incorporating upstream changes you may want to: + +- Read this document +- Get familiar with tig (text-mode interface for git) +- Find the best tool for resolving merge conflicts +- Use diff3 conflict resolution strategy + (https://blog.nilbus.com/take-the-pain-out-of-git-conflict-resolution-use-diff3/) + +## Send email announcing you're starting work + +To better spread the information send the following email: + +``` +Title: k8s bump is starting... + +I'm starting the process of updating our fork to bring in +the latest available version of kubernetes. This means that +every PR landing in openshift/kubernetes should go through +extra scrutiny and only 2 exceptions allow merging PRs in the +upcoming time: +1. High priority backports which require landing master first +to start the backport process. +2. Critical PRs unblocking the org. +In both cases make sure to reach out to me for final approval. + +There is no ETA yet, but feel free to reach out to me with +any questions. +``` + +## Preparing the local repo clone + +Clone from a personal fork of kubernetes via a pushable (ssh) url: + +``` +git clone git@github.com:/kubernetes +``` + +Add a remote for upstream and fetch its branches: + +``` +git remote add --fetch upstream https://github.com/kubernetes/kubernetes +``` + +Add a remote for the openshift fork and fetch its branches: + +``` +git remote add --fetch openshift https://github.com/openshift/kubernetes +``` + +## Creating a new local branch for the new rebase + +- Branch the target `k8s.io/kubernetes` release tag (e.g. `v1.25.0`) to a new + local branch + +``` +git checkout -b rebase-1.25.0 v1.25.0 +``` + +- Merge `openshift(master)` branch into the `rebase-1.25.0` branch with merge + strategy `ours`. It discards all changes from the other branch (`openshift/master`) + and create a merge commit. This leaves the content of your branch unchanged, + and when you next merge with the other branch, Git will only consider changes made + from this point forward. (Do not confuse this with `ours` conflict resolution + strategy for `recursive` merge strategy, `-X` option.) + +``` +git merge -s ours openshift/master +``` + +## Creating a spreadsheet of carry commits from the previous release + +Given the upstream tag (e.g. `v1.24.2`) of the most recent rebase and the name +of the branch that is targeted for rebase (e.g. `openshift/master`), generate a tsv file +containing the set of carry commits that need to be considered for picking: + +``` +echo 'Comment Sha\tAction\tClean\tSummary\tCommit link\tPR link' > ~/Documents/v1.24.2.tsv +``` +``` +git log $( git merge-base openshift/master v1.24.2 )..openshift/master --ancestry-path --reverse --no-merges --pretty='tformat:%x09%h%x09%x09%x09%s%x09https://github.com/openshift/kubernetes/commit/%h?w=1' | grep -E $'\t''UPSTREAM: .*'$'\t' | sed -E 's~UPSTREAM: ([0-9]+)(:.*)~UPSTREAM: \1\2\thttps://github.com/kubernetes/kubernetes/pull/\1~' >> ~/Documents/v1.24.2.tsv +``` + +This tsv file can be imported into a google sheets spreadsheet to track the +progress of picking commits to the new rebase branch. The spreadsheet can also +be a way of communicating with rebase reviewers. For an example of this +communication, please see the [the spreadsheet used for the 1.24 +rebase](https://docs.google.com/spreadsheets/d/10KYptJkDB1z8_RYCQVBYDjdTlRfyoXILMa0Fg8tnNlY/edit). + +## Picking commits from the previous rebase branch to the new branch + +Go through the spreadsheet and for every commit set one of the appropriate actions: + - `p`, to pick the commit + - `s`, to squash it (add a comment with the sha of the target) + - `d`, to drop the commit (if it is not obvious, comment why) + +Set up conditional formatting in the google sheet to color these lines appropriately. + +Commits carried on rebase branches have commit messages prefixed as follows: + +- `UPSTREAM: :` + - A persistent carry that should probably be picked for the subsequent rebase branch. + - In general, these commits are used to modify behavior for consistency or + compatibility with openshift. +- `UPSTREAM: :` + - A carry that should probably not be picked for the subsequent rebase branch. + - In general, these commits are used to maintain the codebase in ways that are + branch-specific, like the update of generated files or dependencies. +- `UPSTREAM: 77870:` + - The number identifies a PR in upstream kubernetes + (i.e. `https://github.com/kubernetes/kubernetes/pull/`) + - A commit with this message should only be picked into the subsequent rebase branch + if the commits of the referenced PR are not included in the upstream branch. + - To check if a given commit is included in the upstream branch, open the referenced + upstream PR and check any of its commits for the release tag (e.g. `v.1.25.0`) + targeted by the new rebase branch. For example: + - + +With these guidelines in mind, pick the appropriate commits from the previous rebase +branch into the new rebase branch. Create a new filter view in the spreadsheet to allow +you get a view where `Action==p || Action==s` and copy paste the shas to `git cherry-pick` +command. Use `tr '\n' ' ' <<< ""` to get a space separated list +from the copy&paste. + +Where it makes sense to do so, squash carried changes that are tightly coupled to +simplify future rebases. If the commit message of a carry does not conform to +expectations, feel free to revise and note the change in the spreadsheet row for the +commit. + +If you first pick all the pick+squash commits first and push them for review it is easier for you +and your reviewers to check the code changes and you squash it at the end. + +When filling in Clean column in the spreadsheet make sure to use the following +number to express the complexity of the pick: +- 0 - clean +- 1 - format fixups +- 2 - code fixups +- 3 - logic changes + +Explicit commit rules: +- Anything touching `openshift-hack/`, openshift specific READMEs or similar files + should be squashed to 1 commit named "UPSTREAM: : Add OpenShift specific files" +- Updating generated files coming from kubernetes should be `` commit +- Generated changes should never be mixed with non-generated changes. If a carry is + ever seen to contain generated changes, those changes should be dropped. + +## Update the hyperkube image version to the release tag + +The [hyperkube image](openshift-hack/images/hyperkube/Dockerfile.rhel) +hard-codes the Kubernetes version in an image label. It's necessary to manually +set this label to the new release tag. Prefix the commit summary with +`UPSTREAM: : (squash)` and squash it before merging the rebase PR. + +This value, among other things, is used by ART to inject appropriate version of +kubernetes during build process, so it always has to reflect correct level of +kubernetes. + +## Update base-os and test images + +To be able to use the latest kubelet from a pull request, the openshift/release +job layers the built RPM [on top of the `rhel-coreos` image](https://github.com/openshift/release/blob/78568fbde1ee9a15bc6ab08c7c49ae3539d3e302/ci-operator/config/openshift/kubernetes/openshift-kubernetes-master.yaml#L102-L113). +Make sure that the `FROM` uses the appropriate OCP version which corresponds +with what we have in the [hyperkube image](openshift-hack/images/hyperkube/Dockerfile.rhel). + +Similarly, update `FROM` in [test image](openshift-hack/images/tests/Dockerfile.rhel) +to match the one from [hyperkube image](openshift-hack/images/hyperkube/Dockerfile.rhel). + +## Updating dependencies + +Once the commits are all picked from the previous rebase branch, and your PR +is mostly ready, each of the following repositories need to be updated to depend +on the upstream tag targeted by the rebase: + +- https://github.com/openshift/api +- https://github.com/openshift/apiserver-library-go +- https://github.com/openshift/client-go +- https://github.com/openshift/library-go + +Often these repositories are updated in parallel by other team members, so make +sure to ask around before starting the work of bumping their dependencies. + +Once the above repos have been updated to depend on the target release, +it will be necessary to update `go.mod` to point to the appropriate revision +of these repos by running `hack/pin-dependency.sh` for each of them and then running +`hack/update-vendor.sh` (as per the [upstream documentation](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/vendor.md#adding-or-updating-a-dependency)). + +Make sure to commit the result of a vendoring update with `UPSTREAM: : bump(*)`. +If you have already bumped the dependencies to get the repo to compile, +don't forget to squash the commits before merging the PR. + +### Updating dependencies for pending bumps + +The upstream `hack/pin-dependency.sh` script only supports setting dependency +for the original repository. To pin to a fork branch that has not yet been +merged (i.e. to test a rebase ahead of shared library bumps having merged), the +following `go mod` invocations are suggested: + +``` +go mod edit -replace github.com/openshift/=github.com//@SHA +go mod tidy && go mod vendor +``` + +Alternatively, you can edit `go.mod` file manually with your favourite editor and use search&replace. + +## Review test annotation rules + +The names of upstream e2e tests are annotated according to the a set of +[declarative rules](openshift-hack/e2e/annotate/rules.go). These annotations +are used to group tests into suites and to skip tests that are known not to be +incompatible with some or all configurations of OpenShift. + +When performing a rebase, it is important to review the rules to +ensure they are still relevant: + +- [ ] Ensure that `[Disabled:Alpha]` rules are appropriate for the current kube + level. Alpha features that are not enabled by default should be targeted + by this annotation to ensure that tests of those features are skipped. +- [ ] Add new skips (along with a bz to track resolution) where e2e tests fail + consistently. + +Test failures representing major issues affecting cluster capability will +generally need to be addressed before merge of the rebase PR, but minor issues +(e.g. tests that fail to execute correctly but don't appear to reflect a +regression in behavior) can often be skipped and addressed post-merge. + +## Updating generated files + +- Update generated files by running `make update` + - This step depends on etcd being installed in the path, which can be + accomplished by running `hack/install-etcd.sh`. + - Alternatively, run it in the same container as CI is using for build_root that already has + the etcd at correct version +``` +podman run -it --rm -v $( pwd ):/go/k8s.io/kubernetes:Z --workdir=/go/k8s.io/kubernetes registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.20-openshift-4.15 make update OS_RUN_WITHOUT_DOCKER=yes FORCE_HOST_GO=1 +``` +- Commit the resulting changes as `UPSTREAM: : make update`. + +## Building and testing + +- Build the code with `make` +- Test the code with `make test` + - Where test failures are encountered and can't be trivially resolved, the + spreadsheet can be used to track those failures to their resolution. The + example spreadsheet should have a sheet that demonstrates this tracking. + - Where a test failure proves challenging to fix without specialized knowledge, + make sure to coordinate with the team(s) responsible for area(s) of focus + exhibiting test failure. If in doubt, ask for help! +- Verify the code with `make verify` + +## Reacting to new commits + +Inevitably, a rebase will take long enough that new commits will end up being +merged to the targeted openshift/kubernetes branch after the rebase is +underway. The following strategy is suggested to minimize the cost of incorporating +these new commits: + +- rename existing rebase branch (e.g. 1.25.0-beta.2 -> 1.25.0-beta.2-old) +- create new rebase branch from HEAD of master +- merge the target upstream tag (e.g. 1.25.0-beta.2) with strategy ours +- pick all carries from renamed rebase branch (e.g. 1.25.0-beta.2-old) +- pick new carries from the openshift/kubernetes target branch +- add details of the new carries to the spreadsheet +- update generated files + +With good tooling, the cost of this procedure should be ~10 minutes at +most. Re-picking carries should not result in conflicts since the base of the +rebase branch will be the same as before. The only potential sources of conflict +will be the newly added commits. + +## Ensuring the stability of the release + +To ensure we don't regress the product by introducing a new level of kubernetes +it is required to create a new sheet in the following spreadsheet and pass all +the variants: https://docs.google.com/spreadsheets/d/1PBk3eqYaPbvY982k_a0W7EGx7CBCHTmKrN6FyNSTDeA/edit#gid=0 + +NOTE: Double check with TRT team if the current variants in that spreadsheet +are up-to-date. + +## Send email announcing upcoming merge + +Second email should be send close O(~3 days) to merging the bump: + +``` +Title: k8s bump landing... + + is bumping k8s to version . +The following repositories have been already bumped as well: + + + +Followup work has been assigned to appropriate teams +through bugzillas linked in the code. Please treat +them as the highest priority after landing the bump. + +Finally, this means we are blocking ALL PRs to our +kubernetes fork. +``` + +After sending the email block the merge queue, see below. + +## Blocking the merge queue + +Close to merging a rebase it is good practice to block any merges to openshift/kubernetes +fork. To do that follow these steps: + +1. Open new issues in openshift/kubernetes +2. Use `Master Branch Frozen For Kubernetes Merging | branch:master` as issue title +3. Add `tide/merge-blocker` label to issues (you might need group lead for this) +4. All PR's (including the rebase) are now forbidden to merge to master branch +5. Before landing the rebase PR, close this issue + +## Send email announcing work done + +Last email should be send after merging the bump as a +reply to previous: + +``` + just merged. +It'll take some time to get newer kublet, but in the meantime we'll +continue to monitor CI. I encourage everyone to hold off from +merging any major changes to our kubernetes fork to provide clear CI +signal for the next 2-3 days. + +The following bugs were opened during the process, please treat +them as the highest priority and release blockers for your team: + + +``` + +## Followup work + +1. Update cluster-kube-apiserver-operator `pre-release-lifecycle` alert's +`removed_release` version similarly to https://github.com/openshift/cluster-kube-apiserver-operator/pull/1382. + +## Updating with `git merge` + +*This is the preferred way to update to patch releases of kubernetes* + +After the initial bump as described above it is possible to update +to newer released version using `git merge`. To do that follow these steps: + + +1. Fetch latest upstream changes: +``` +git fetch upstream +``` + where `upstream` points at https://github.com/kubernetes/kubernetes/, and check + the incoming changes: +``` +git log v1.25.0..v1.25.2 --ancestry-path --reverse --no-merges +``` +2. (optional) Revert any commits that were merged into kubernetes between previous + update and current one. + +3. Fetch latest state of openshift fork, checkout the appropriate branch and + create a new branch for the bump +``` +git fetch openshift +git checkout openshift/release-4.12 +git checkout -b bump-1.25.2 +``` + where `openshift` points at https://github.com/openshift/kubernetes/. + +4. Merge the changes from appropriate [released version](https://kubernetes.io/releases/patch-releases/#detailed-release-history-for-active-branches): +``` +git merge v1.25.2 +``` + Most likely you'll encounter conflicts, but most are around go.sum and go.mod + files, coming from newer versions, but at this point in time leave the conflicts + as they are and continue the merge. +``` +git add --all +git merge --continue +``` + This should create a commit titled `Merge tag 'v1.25.2' into bump-1.25.2`. + +5. Now return to the list of conflicts from previous step and fix all the files + picking appropriate changes, in most cases picking the newer version. + When done, commit all of them as another commit: +``` +git add --all +git commit -m "UPSTREAM: : manually resolve conflicts" +``` + This ensures the person reviewing the bump can easily review all the conflicts + and their resolution. + +6. (optional) Update openshift dependencies and run `go mod tidy` to have the + branch names resolved to proper go mod version. Remember to use the released + versions matching the branch you're modifying. + This is usually required ONLY if you know there has been changes in one of + the libraries that need to be applied to our fork, which happens rarely. + Also usually, this is done by the team introducing the changes in the libraries. + +7. Run `/bin/bash` in a container using the command and image described in [Updating generated files](#updating-generated-files) + section: +``` +podman run -it --rm -v $( pwd ):/go/k8s.io/kubernetes:Z --workdir=/go/k8s.io/kubernetes registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.20-openshift-4.15 /bin/bash +``` + In the container run: +``` +export OS_RUN_WITHOUT_DOCKER=yes +export FORCE_HOST_GO=1 +hack/update-vendor.sh +make update +``` + +NOTE: Make sure to use the correct version of the image (both openshift and golang +versions must be appropriate), as a reference check `openshift-hack/images/hyperkube/Dockerfile.rhel` +file. + +NOTE: You might encounter problems when running the above, make sure to check [Potential problems](#potential-problems) +section below. + + +8. Update kubernetes version in `openshift-hack/images/hyperkube/Dockerfile.rhel` + and commit all of that as: +``` +git commit -m "UPSTREAM: : hack/update-vendor.sh, make update and update image" +``` + +9. Congratulations, you can open a PR with updated k8s patch version! + +### Potential problems + +While running `make update` in step 7 above, you might encounter one of the following problems: + +``` +go: inconsistent vendoring in /go/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/vendor/k8s.io/code-generator: +``` +To solve it, edit `staging/src/k8s.io/code-generator/go.mod` removing this line: `k8s.io/code-generator => ../code-generator`. +Try re-running `make update`, if the problem re-appears change directory to `staging/src/k8s.io/code-generator` +and run `go mod tidy` and `go mod vendor`. + +NOTE: Make sure to bring back this line: `k8s.io/code-generator => ../code-generator` in `staging/src/k8s.io/code-generator/go.mod` +after you've run `make update`, otherwise `verify` step will fail during submission. + +``` +etcd version 3.5.6 or greater required +``` +Grab newer version of etcd from https://github.com/etcd-io/etcd/releases/ and place +it in `/usr/local/bin/etcd`. + +## Updating with `rebase.sh` (experimental) + +The above steps are available as a script that will merge and rebase along the happy path without automatic conflict +resolution and at the end will create a PR for you. + +Here are the steps: +1. Create a new BugZilla with the respective OpenShift version to rebase (Target Release stays ---), + Prio&Severity to High with a proper description of the change logs. + See [BZ2021468](https://bugzilla.redhat.com/show_bug.cgi?id=2021468) as an example. +2. It's best to start off with a fresh fork of [openshift/kubernetes](https://github.com/openshift/kubernetes/). Stay on the master branch. +3. This script requires `jq`, `git`, `podman` and `bash`, `gh` is optional. +4. In the root dir of that fork run: +``` +openshift-hack/rebase.sh --k8s-tag=v1.25.2 --openshift-release=release-4.12 --bugzilla-id=2003027 +``` + +where `k8s-tag` is the [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes/) release tag, the `openshift-release` +is the OpenShift release branch in [openshift/kubernetes](https://github.com/openshift/kubernetes/) and the `bugzilla-id` is the +BugZilla ID created in step (1). + +5. In case of conflicts, it will ask you to step into another shell to resolve those. The script will continue by committing the resolution with `UPSTREAM: `. +6. At the end, there will be a "rebase-$VERSION" branch pushed to your fork. +7. If you have `gh` installed and are logged in, it will attempt to create a PR for you by opening a web browser. diff --git a/build/pause/Dockerfile.Rhel b/build/pause/Dockerfile.Rhel new file mode 100644 index 0000000000000..5dc852525b06d --- /dev/null +++ b/build/pause/Dockerfile.Rhel @@ -0,0 +1,12 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +WORKDIR /go/src/github.com/openshift/kubernetes/build/pause +COPY . . +RUN mkdir -p bin && \ + gcc -Os -Wall -Werror -o bin/pause ./linux/pause.c + +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +COPY --from=builder /go/src/github.com/openshift/kubernetes/build/pause/bin/pause /usr/bin/pod +LABEL io.k8s.display-name="OpenShift Pod" \ + io.k8s.description="This is a component of OpenShift and contains the binary that holds the pod namespaces." \ + io.openshift.tags="openshift" +ENTRYPOINT [ "/usr/bin/pod" ] diff --git a/build/run.sh b/build/run.sh index 3ecc2dacb7789..c0eb0b83270b0 100755 --- a/build/run.sh +++ b/build/run.sh @@ -25,6 +25,12 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "$KUBE_ROOT/build/common.sh" +# Allow running without docker (e.g. in openshift ci) +if [[ "${OS_RUN_WITHOUT_DOCKER:-}" ]]; then + "${@}" + exit 0 +fi + KUBE_RUN_COPY_OUTPUT="${KUBE_RUN_COPY_OUTPUT:-y}" kube::build::verify_prereqs diff --git a/cmd/kube-apiserver/.import-restrictions b/cmd/kube-apiserver/.import-restrictions index 32b74c8868fc2..38c8633e00b7a 100644 --- a/cmd/kube-apiserver/.import-restrictions +++ b/cmd/kube-apiserver/.import-restrictions @@ -2,6 +2,7 @@ rules: - selectorRegexp: k8s[.]io/kubernetes allowedPrefixes: - k8s.io/kubernetes/cmd/kube-apiserver + - k8s.io/kubernetes/openshift-kube-apiserver - k8s.io/kubernetes/pkg - k8s.io/kubernetes/plugin - k8s.io/kubernetes/test/utils diff --git a/cmd/watch-termination/main.go b/cmd/watch-termination/main.go new file mode 100644 index 0000000000000..aa3aa8800854b --- /dev/null +++ b/cmd/watch-termination/main.go @@ -0,0 +1,366 @@ +package main + +import ( + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "os/signal" + "strings" + "sync" + "syscall" + "time" + + "gopkg.in/natefinch/lumberjack.v2" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +func main() { + os.Exit(run()) +} + +func run() int { + terminationLog := flag.String("termination-log-file", "", "Write logs after SIGTERM to this file (in addition to stderr)") + terminationLock := flag.String("termination-touch-file", "", "Touch this file on SIGTERM and delete on termination") + processOverlapDetectionFile := flag.String("process-overlap-detection-file", "", "This file is present when the kube-apiserver initialization timed out while waiting for kubelet to terminate old process") + kubeconfigPath := flag.String("kubeconfig", "", "Optional kubeconfig used to create events") + gracefulTerminatioPeriod := flag.Duration("graceful-termination-duration", 105*time.Second, "The duration of the graceful termination period, e.g. 105s") + + klog.InitFlags(nil) + flag.Set("v", "9") + + // never log to stderr, only through our termination log writer (which sends it also to stderr) + flag.Set("logtostderr", "false") + flag.Set("stderrthreshold", "99") + + flag.Parse() + args := flag.CommandLine.Args() + + if len(args) == 0 { + fmt.Println("Missing command line") + return 1 + } + + // use special tee-like writer when termination log is set + termCh := make(chan struct{}) + var stderr io.Writer = os.Stderr + var terminationLogger *terminationFileWriter + if len(*terminationLog) > 0 { + terminationLogger = &terminationFileWriter{ + Writer: os.Stderr, + fn: *terminationLog, + startFileLoggingCh: termCh, + } + stderr = terminationLogger + + // do the klog file writer dance: klog writes to all outputs of lower + // severity. No idea why. So we discard for anything other than info. + // Otherwise, we would see errors multiple times. + klog.SetOutput(ioutil.Discard) + klog.SetOutputBySeverity("INFO", stderr) + } + + var client kubernetes.Interface + if len(*kubeconfigPath) > 0 { + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: *kubeconfigPath}, &clientcmd.ConfigOverrides{}) + if cfg, err := loader.ClientConfig(); err != nil { + klog.Errorf("failed to load kubeconfig %q: %v", *kubeconfigPath, err) + return 1 + } else { + client = kubernetes.NewForConfigOrDie(cfg) + } + } + + if processOverlapDetectionFile != nil && len(*processOverlapDetectionFile) > 0 { + var deleteDetectionFileOnce sync.Once + + if _, err := os.Stat(*processOverlapDetectionFile); err != nil && !os.IsNotExist(err) { + klog.Errorf("failed to read process overlap detection file %q: %v", *processOverlapDetectionFile, err) + return 1 + } else if err == nil { + ref, err := eventReference() + if err != nil { + klog.Errorf("failed to get event target: %v", err) + return 1 + } + go func() { + defer deleteDetectionFileOnce.Do(func() { + if err := os.Remove(*processOverlapDetectionFile); err != nil { + klog.Warningf("Failed to remove process overlap termination file %q: %v", *processOverlapDetectionFile, err) + } + }) + if err := retry.OnError(retry.DefaultBackoff, func(err error) bool { + select { + case <-termCh: + // stop retry on termination + return false + default: + } + // every error is retriable + return true + }, func() error { + return eventf(client.CoreV1().Events(ref.Namespace), *ref, corev1.EventTypeWarning, "TerminationProcessOverlapDetected", "The kube-apiserver initialization timed out while waiting for kubelet to terminate old process") + }); err != nil { + klog.Warning(err) + } + }() + } + } + + // touch file early. If the file is not removed on termination, we are not + // terminating cleanly via SIGTERM. + if len(*terminationLock) > 0 { + ref, err := eventReference() + if err != nil { + klog.Errorf("failed to get event target: %v", err) + return 1 + } + + if st, err := os.Stat(*terminationLock); err == nil { + podName := "unknown" + if v := os.Getenv("POD_NAME"); len(v) > 0 { + podName = v // pod name is always the same for static pods + } + msg := fmt.Sprintf("Previous pod %s started at %s did not terminate gracefully", podName, st.ModTime().String()) + + klog.Warning(msg) + _, _ = terminationLogger.WriteToTerminationLog([]byte(msg + "\n")) + + go retry.OnError(retry.DefaultBackoff, func(err error) bool { + select { + case <-termCh: + // stop retry on termination + return false + default: + } + // every error is retriable + return true + }, func() error { + return eventf(client.CoreV1().Events(ref.Namespace), *ref, corev1.EventTypeWarning, "NonGracefulTermination", msg) + }) + + klog.Infof("Deleting old termination lock file %q", *terminationLock) + if err := os.Remove(*terminationLock); err != nil { + klog.Errorf("Old termination lock file deletion failed: %v", err) + } + } + + // separation to see where the new one is starting + _, _ = terminationLogger.WriteToTerminationLog([]byte("---\n")) + + klog.Infof("Touching termination lock file %q", *terminationLock) + if err := touch(*terminationLock); err != nil { + klog.Infof("Error touching %s: %v", *terminationLock, err) + // keep going + } + + var deleteLockOnce sync.Once + + if *gracefulTerminatioPeriod > 2*time.Second { + go func() { + <-termCh + <-time.After(*gracefulTerminatioPeriod - 2*time.Second) + + deleteLockOnce.Do(func() { + klog.Infof("Graceful termination time nearly passed and kube-apiserver has still not terminated. Deleting termination lock file %q to avoid a false positive.", *terminationLock) + if err := os.Remove(*terminationLock); err != nil { + klog.Errorf("Termination lock file deletion failed: %v", err) + } + + if err := eventf(client.CoreV1().Events(ref.Namespace), *ref, corev1.EventTypeWarning, "GracefulTerminationTimeout", "kube-apiserver did not terminate within %s", *gracefulTerminatioPeriod); err != nil { + klog.Error(err) + } + }) + }() + } + + defer deleteLockOnce.Do(func() { + klog.Infof("Deleting termination lock file %q", *terminationLock) + if err := os.Remove(*terminationLock); err != nil { + klog.Errorf("Termination lock file deletion failed: %v", err) + } + }) + } + + cmd := exec.Command(args[0], args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = stderr + + // forward SIGTERM and SIGINT to child + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for s := range sigCh { + select { + case <-termCh: + default: + close(termCh) + } + + klog.Infof("Received signal %s. Forwarding to sub-process %q.", s, args[0]) + + cmd.Process.Signal(s) + } + }() + + klog.Infof("Launching sub-process %q", cmd) + rc := 0 + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + rc = exitError.ExitCode() + } else { + klog.Infof("Failed to launch %s: %v", args[0], err) + return 255 + } + } + + // remove signal handling + signal.Stop(sigCh) + close(sigCh) + wg.Wait() + + klog.Infof("Termination finished with exit code %d", rc) + return rc +} + +// terminationFileWriter forwards everything to the embedded writer. When +// startFileLoggingCh is closed, everything is appended to the given file name +// in addition. +type terminationFileWriter struct { + io.Writer + fn string + startFileLoggingCh <-chan struct{} + + logger io.Writer +} + +func (w *terminationFileWriter) WriteToTerminationLog(bs []byte) (int, error) { + if w == nil { + return len(bs), nil + } + + if w.logger == nil { + l := &lumberjack.Logger{ + Filename: w.fn, + MaxSize: 100, + MaxBackups: 3, + MaxAge: 28, + Compress: false, + } + w.logger = l + fmt.Fprintf(os.Stderr, "Copying termination logs to %q\n", w.fn) + } + if n, err := w.logger.Write(bs); err != nil { + return n, err + } else if n != len(bs) { + return n, io.ErrShortWrite + } + return len(bs), nil +} + +func (w *terminationFileWriter) Write(bs []byte) (int, error) { + // temporary hack to avoid logging sensitive tokens. + // TODO: drop when we moved to a non-sensitive storage format + if strings.Contains(string(bs), "URI=\"/apis/oauth.openshift.io/v1/oauthaccesstokens/") || strings.Contains(string(bs), "URI=\"/apis/oauth.openshift.io/v1/oauthauthorizetokens/") { + return len(bs), nil + } + + select { + case <-w.startFileLoggingCh: + if n, err := w.WriteToTerminationLog(bs); err != nil { + return n, err + } + default: + } + + return w.Writer.Write(bs) +} + +func touch(fn string) error { + _, err := os.Stat(fn) + if os.IsNotExist(err) { + file, err := os.Create(fn) + if err != nil { + return err + } + defer file.Close() + return nil + } + + currentTime := time.Now().Local() + return os.Chtimes(fn, currentTime, currentTime) +} + +func eventf(client corev1client.EventInterface, ref corev1.ObjectReference, eventType, reason, messageFmt string, args ...interface{}) error { + t := metav1.Time{Time: time.Now()} + host, _ := os.Hostname() // expicitly ignore error. Empty host is fine + + e := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: ref.Namespace, + }, + InvolvedObject: ref, + Reason: reason, + Message: fmt.Sprintf(messageFmt, args...), + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventType, + Source: corev1.EventSource{Component: "apiserver", Host: host}, + } + + _, err := client.Create(context.TODO(), e, metav1.CreateOptions{}) + + if err == nil { + klog.V(2).Infof("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + } + + return err +} + +func eventReference() (*corev1.ObjectReference, error) { + ns := os.Getenv("POD_NAMESPACE") + pod := os.Getenv("POD_NAME") + if len(ns) == 0 && len(pod) > 0 { + serviceAccountNamespaceFile := "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + if _, err := os.Stat(serviceAccountNamespaceFile); err == nil { + bs, err := ioutil.ReadFile(serviceAccountNamespaceFile) + if err != nil { + return nil, err + } + ns = string(bs) + } + } + if len(ns) == 0 { + pod = "" + ns = "kube-system" + } + if len(pod) == 0 { + return &corev1.ObjectReference{ + Kind: "Namespace", + Name: ns, + APIVersion: "v1", + }, nil + } + + return &corev1.ObjectReference{ + Kind: "Pod", + Namespace: ns, + Name: pod, + APIVersion: "v1", + }, nil +} diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 6b673a129259d..ffc7c24e5bc17 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -78,6 +78,9 @@ kube::golang::server_targets() { staging/src/k8s.io/kube-aggregator staging/src/k8s.io/apiextensions-apiserver cluster/gce/gci/mounter + cmd/watch-termination + openshift-hack/cmd/k8s-tests + openshift-hack/cmd/k8s-tests-ext ) echo "${targets[@]}" } @@ -316,20 +319,7 @@ readonly KUBE_ALL_TARGETS=( ) readonly KUBE_ALL_BINARIES=("${KUBE_ALL_TARGETS[@]##*/}") -readonly KUBE_STATIC_BINARIES=( - apiextensions-apiserver - kube-aggregator - kube-apiserver - kube-controller-manager - kube-scheduler - kube-proxy - kube-log-runner - kubeadm - kubectl - kubectl-convert - kubemark - mounter -) +readonly KUBE_STATIC_BINARIES=() # Fully-qualified package names that we want to instrument for coverage information. readonly KUBE_COVERAGE_INSTRUMENTED_PACKAGES=( @@ -508,7 +498,7 @@ kube::golang::set_platform_envs() { # if CC is defined for platform then always enable it ccenv=$(echo "$platform" | awk -F/ '{print "KUBE_" toupper($1) "_" toupper($2) "_CC"}') - if [ -n "${!ccenv-}" ]; then + if [ -n "${!ccenv-}" ]; then export CGO_ENABLED=1 export CC="${!ccenv}" fi @@ -519,27 +509,6 @@ kube::golang::set_platform_envs() { # env-var GO_VERSION is the desired go version to use, downloading it if needed (defaults to content of .go-version) # env-var FORCE_HOST_GO set to a non-empty value uses the go version in the $PATH and skips ensuring $GO_VERSION is used kube::golang::internal::verify_go_version() { - # default GO_VERSION to content of .go-version - GO_VERSION="${GO_VERSION:-"$(cat "${KUBE_ROOT}/.go-version")"}" - if [ "${GOTOOLCHAIN:-auto}" != 'auto' ]; then - # no-op, just respect GOTOOLCHAIN - : - elif [ -n "${FORCE_HOST_GO:-}" ]; then - # ensure existing host version is used, like before GOTOOLCHAIN existed - export GOTOOLCHAIN='local' - else - # otherwise, we want to ensure the go version matches GO_VERSION - GOTOOLCHAIN="go${GO_VERSION}" - export GOTOOLCHAIN - # if go is either not installed or too old to respect GOTOOLCHAIN then use gimme - if ! (command -v go >/dev/null && [ "$(go version | cut -d' ' -f3)" = "${GOTOOLCHAIN}" ]); then - export GIMME_ENV_PREFIX=${GIMME_ENV_PREFIX:-"${KUBE_OUTPUT}/.gimme/envs"} - export GIMME_VERSION_PREFIX=${GIMME_VERSION_PREFIX:-"${KUBE_OUTPUT}/.gimme/versions"} - # eval because the output of this is shell to set PATH etc. - eval "$("${KUBE_ROOT}/third_party/gimme/gimme" "${GO_VERSION}")" - fi - fi - if [[ -z "$(command -v go)" ]]; then kube::log::usage_from_stdin </dev/null 2>&1; then - go -C "${KUBE_ROOT}/hack/tools" install ./ncpu || echo "Will not automatically set GOMAXPROCS" + # shellcheck disable=SC2164 + pushd "${KUBE_ROOT}/hack/tools" >/dev/null + go install -mod=readonly ./ncpu || echo "Will not automatically set GOMAXPROCS" + # shellcheck disable=SC2164 + popd >/dev/null fi if command -v ncpu >/dev/null 2>&1; then GOMAXPROCS=$(ncpu) diff --git a/hack/lib/version.sh b/hack/lib/version.sh index ffd8bc3789a09..ae3853df3841a 100644 --- a/hack/lib/version.sh +++ b/hack/lib/version.sh @@ -37,6 +37,8 @@ kube::version::get_version_vars() { return fi + KUBE_GIT_VERSION=$(sed -rn 's/.*io.openshift.build.versions="kubernetes=(1.[0-9]+.[0-9]+)"/v\1/p' openshift-hack/images/hyperkube/Dockerfile.rhel) + # If the kubernetes source was exported through git archive, then # we likely don't have a git tree, but these magic values may be filled in. # shellcheck disable=SC2016,SC2050 diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh index afddf8df9825d..d2326809a342f 100755 --- a/hack/make-rules/test.sh +++ b/hack/make-rules/test.sh @@ -52,7 +52,8 @@ kube::test::find_go_packages() { -e '^k8s.io/kubernetes/test/e2e$' \ -e '^k8s.io/kubernetes/test/e2e_node(/.*)?$' \ -e '^k8s.io/kubernetes/test/e2e_kubeadm(/.*)?$' \ - -e '^k8s.io/.*/test/integration(/.*)?$' + -e '^k8s.io/.*/test/integration(/.*)?$' \ + -e '^k8s.io/kubernetes/openshift-hack/e2e(/.*)?$' ) } diff --git a/hack/make-rules/update.sh b/hack/make-rules/update.sh index 69684b5d7852e..aa7cf66378373 100755 --- a/hack/make-rules/update.sh +++ b/hack/make-rules/update.sh @@ -36,6 +36,8 @@ if ! ${ALL} ; then fi BASH_TARGETS=( + update-kubensenter + update-test-annotations update-codegen update-generated-api-compatibility-data update-generated-docs diff --git a/hack/make-rules/verify.sh b/hack/make-rules/verify.sh index c53cbf230e871..dd0cfbd946c7e 100755 --- a/hack/make-rules/verify.sh +++ b/hack/make-rules/verify.sh @@ -44,6 +44,23 @@ EXCLUDED_PATTERNS=( "verify-openapi-docs-urls.sh" # Spams docs URLs, don't run in CI. ) +# Excluded checks for openshift/kubernetes fork that are always skipped. +EXCLUDED_PATTERNS+=( + "verify-boilerplate.sh" # Carries do not require boilerplate + "verify-no-vendor-cycles.sh" # Incompatible with the way many carries are specified + "verify-publishing-bot.sh" # Verifies the upstream rules, which are not maintained in o/k +) + +# Skipped checks for openshift/kubernetes fork that need to be fixed. +EXCLUDED_PATTERNS+=( + "verify-openapi-spec.sh" # TODO(soltysh) Fails in CI during trap phase + "verify-golangci-lint.sh" # TODO(soltysh) Fails to build required tooling + "verify-shellcheck.sh" # TODO(soltysh) Requires either docker or local shellcheck + "verify-spelling.sh" # TODO(soltysh) Need to ensure installation of misspell command + "verify-mocks.sh" # TODO(soltysh) I don't expect us needed mocks re-generation + "verify-e2e-suites.sh" # TODO(atiratree) needs to be patched for openshift-hack dir and --list-tests option is disabled by 'UPSTREAM: : temporarily disable reporting e2e text bugs and enforce 2nd labeling to make tests work' +) + # Exclude typecheck in certain cases, if they're running in a separate job. if [[ ${EXCLUDE_TYPECHECK:-} =~ ^[yY]$ ]]; then EXCLUDED_PATTERNS+=( diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 281a6f2749a98..a89a1b14247f2 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -763,7 +763,7 @@ function codegen::subprojects() { CODEGEN_PKG="${codegen}" \ UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS}" \ API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR}" \ - ./hack/update-codegen.sh > >(indent) 2> >(indent >&2) + GOFLAGS=-mod=readonly ./hack/update-codegen.sh > >(indent) 2> >(indent >&2) popd >/dev/null done } diff --git a/hack/update-kubensenter.sh b/hack/update-kubensenter.sh new file mode 120000 index 0000000000000..1b263065ff459 --- /dev/null +++ b/hack/update-kubensenter.sh @@ -0,0 +1 @@ +../openshift-hack/update-kubensenter.sh \ No newline at end of file diff --git a/hack/update-openapi-spec.sh b/hack/update-openapi-spec.sh index 55abf904f732e..22b65f7eb943b 100755 --- a/hack/update-openapi-spec.sh +++ b/hack/update-openapi-spec.sh @@ -28,7 +28,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::util::require-jq kube::golang::setup_env -kube::etcd::install +# kube::etcd::install # We need to call `make` here because that includes all of the compile and link # flags that we use for a production build, which we need for this script. diff --git a/hack/update-test-annotations.sh b/hack/update-test-annotations.sh new file mode 120000 index 0000000000000..ecf920cd8d6b4 --- /dev/null +++ b/hack/update-test-annotations.sh @@ -0,0 +1 @@ +../openshift-hack/update-test-annotations.sh \ No newline at end of file diff --git a/hack/update-vendor.sh b/hack/update-vendor.sh index f260c92a453d6..c1bdf1b44bdae 100755 --- a/hack/update-vendor.sh +++ b/hack/update-vendor.sh @@ -308,7 +308,7 @@ hack/update-vendor-licenses.sh kube::log::status "vendor: creating OWNERS file" >&11 rm -f "vendor/OWNERS" cat <<__EOF__ > "vendor/OWNERS" -# See the OWNERS docs at https://go.k8s.io/owners +See the OWNERS docs at https://go.k8s.io/owners options: # make root approval non-recursive @@ -326,7 +326,7 @@ for repo in $(kube::util::list_staging_repos); do echo "=== checking k8s.io/${repo}" cd "staging/src/k8s.io/${repo}" loopback_deps=() - kube::util::read-array loopback_deps < <(go list all 2>/dev/null | grep k8s.io/kubernetes/ || true) + kube::util::read-array loopback_deps < <(go list all 2>/dev/null | grep k8s.io/kubernetes/ | grep -v github.com/openshift/apiserver-library-go || true) if (( "${#loopback_deps[@]}" > 0 )); then kube::log::error "${#loopback_deps[@]} disallowed ${repo} -> k8s.io/kubernetes dependencies exist via the following imports: $(go mod why "${loopback_deps[@]}")" >&22 2>&1 exit 1 @@ -336,8 +336,9 @@ done kube::log::status "go.mod: prevent k8s.io/kubernetes --> * --> k8s.io/kubernetes dep" >&11 loopback_deps=() -kube::util::read-array loopback_deps < <(go mod graph | grep ' k8s.io/kubernetes' || true) -if (( "${#loopback_deps[@]}" > 0 )); then +kube::util::read-array loopback_deps < <(go mod graph | grep ' k8s.io/kubernetes' | grep -v github.com/openshift/apiserver-library-go || true) +# Allow apiserver-library-go to depend on k8s.io/kubernetes +if [[ -n ${loopback_deps[*]:+"${loopback_deps[*]}"} && ! "${loopback_deps[*]}" =~ github.com/openshift/apiserver-library-go ]]; then kube::log::error "${#loopback_deps[@]} disallowed transitive k8s.io/kubernetes dependencies exist via the following imports:" >&22 2>&1 kube::log::error "${loopback_deps[@]}" >&22 2>&1 exit 1 diff --git a/hack/verify-external-dependencies-version.sh b/hack/verify-external-dependencies-version.sh index 4734f199e09d3..a395239576fc3 100755 --- a/hack/verify-external-dependencies-version.sh +++ b/hack/verify-external-dependencies-version.sh @@ -31,7 +31,7 @@ export GOBIN="${KUBE_OUTPUT_BIN}" PATH="${GOBIN}:${PATH}" # Install zeitgeist -go install sigs.k8s.io/zeitgeist@v0.5.4 +go install -mod=readonly sigs.k8s.io/zeitgeist@v0.5.4 # Prefer full path for running zeitgeist ZEITGEIST_BIN="$(which zeitgeist)" diff --git a/hack/verify-govulncheck.sh b/hack/verify-govulncheck.sh index 5057f9a314233..120f2f4dcb33a 100755 --- a/hack/verify-govulncheck.sh +++ b/hack/verify-govulncheck.sh @@ -27,7 +27,7 @@ kube::util::ensure_clean_working_dir # This sets up the environment, like GOCACHE, which keeps the worktree cleaner. kube::golang::setup_env -go install golang.org/x/vuln/cmd/govulncheck@v1.1.2 +go install -mod=readonly golang.org/x/vuln/cmd/govulncheck@v1.1.2 # KUBE_VERIFY_GIT_BRANCH is populated in verify CI jobs BRANCH="${KUBE_VERIFY_GIT_BRANCH:-master}" @@ -45,7 +45,7 @@ pushd "${WORKTREE}" >/dev/null govulncheck -scan package ./... > "${KUBE_TEMP}/pr-base.txt" || true popd >/dev/null -echo -e "\n HEAD: $(cat "${KUBE_TEMP}"/head.txt)" -echo -e "\n PR_BASE: $(cat "${KUBE_TEMP}/pr-base.txt")" +echo -e "\n HEAD: $(cat "${KUBE_TEMP}"/head.txt)" +echo -e "\n PR_BASE: $(cat "${KUBE_TEMP}/pr-base.txt")" diff -s -u --ignore-all-space "${KUBE_TEMP}"/pr-base.txt "${KUBE_TEMP}"/head.txt || true diff --git a/hack/verify-kubensenter.sh b/hack/verify-kubensenter.sh new file mode 120000 index 0000000000000..01e1608f153ca --- /dev/null +++ b/hack/verify-kubensenter.sh @@ -0,0 +1 @@ +../openshift-hack/verify-kubensenter.sh \ No newline at end of file diff --git a/hack/verify-openapi-spec.sh b/hack/verify-openapi-spec.sh index a8eaf4ef3f2c6..c88b96b1d15d0 100755 --- a/hack/verify-openapi-spec.sh +++ b/hack/verify-openapi-spec.sh @@ -25,6 +25,7 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -source "${KUBE_ROOT}/hack/lib/verify-generated.sh" +kube::golang::setup_env +# kube::etcd::install kube::verify::generated "Generated files need to be updated" "Please run 'hack/update-openapi-spec.sh'" hack/update-openapi-spec.sh "$@" diff --git a/hack/verify-spelling.sh b/hack/verify-spelling.sh index d4d8be5631a47..0dfc8e76415ed 100755 --- a/hack/verify-spelling.sh +++ b/hack/verify-spelling.sh @@ -32,7 +32,9 @@ export GOBIN="${KUBE_OUTPUT_BIN}" PATH="${GOBIN}:${PATH}" # Install tools we need -go -C "${KUBE_ROOT}/hack/tools" install github.com/client9/misspell/cmd/misspell +pushd "${KUBE_ROOT}/hack/tools" >/dev/null + go install -mod=readonly github.com/client9/misspell/cmd/misspell +popd >/dev/null # Spell checking # All the skipping files are defined in hack/.spelling_failures diff --git a/hack/verify-test-annotations.sh b/hack/verify-test-annotations.sh new file mode 120000 index 0000000000000..a9cbed2d3245a --- /dev/null +++ b/hack/verify-test-annotations.sh @@ -0,0 +1 @@ +../openshift-hack/verify-test-annotations.sh \ No newline at end of file diff --git a/hack/verify-vendor.sh b/hack/verify-vendor.sh index c68fbbbc84081..028a551da5b5e 100755 --- a/hack/verify-vendor.sh +++ b/hack/verify-vendor.sh @@ -84,8 +84,12 @@ pushd "${KUBE_ROOT}" > /dev/null 2>&1 ret=1 fi + # Given that we don't intend to publish staging repos from our fork, + # it does not seem necessary to ensure that dependencies will match + # across staging repos when published. + # # Verify we are pinned to matching levels - hack/lint-dependencies.sh >&2 + #hack/lint-dependencies.sh >&2 popd > /dev/null 2>&1 if [[ ${ret} -gt 0 ]]; then diff --git a/openshift-hack/build-go.sh b/openshift-hack/build-go.sh new file mode 100755 index 0000000000000..dfc663d23a593 --- /dev/null +++ b/openshift-hack/build-go.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +STARTTIME=$(date +%s) + +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +pushd "${OS_ROOT}" > /dev/null || exit 1 + make all WHAT='cmd/kube-apiserver cmd/kube-controller-manager cmd/kube-scheduler cmd/kubelet' +popd > /dev/null || exit 1 + +os::build::version::git_vars + +if [[ "${OS_GIT_TREE_STATE:-dirty}" == "clean" ]]; then + # only when we are building from a clean state can we claim to + # have created a valid set of binaries that can resemble a release + mkdir -p "${OS_OUTPUT_RELEASEPATH}" + echo "${OS_GIT_COMMIT}" > "${OS_OUTPUT_RELEASEPATH}/.commit" +fi + +ret=$?; ENDTIME=$(date +%s); echo "$0 took $((ENDTIME - STARTTIME)) seconds"; exit "$ret" diff --git a/openshift-hack/build-rpms.sh b/openshift-hack/build-rpms.sh new file mode 100755 index 0000000000000..7fec9962e634f --- /dev/null +++ b/openshift-hack/build-rpms.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash + +# This script generates RPMs into _output/releases. All build +# dependencies are required on the host. The build will be performed +# by the upstream makefile called from the spec file. +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +# Only build linux by default. Clearing this value will build all platforms +OS_ONLY_BUILD_PLATFORMS="${OS_ONLY_BUILD_PLATFORMS:-linux/amd64}" + +function cleanup() { + return_code=$? + os::util::describe_return_code "${return_code}" + exit "${return_code}" +} +trap "cleanup" EXIT + +# check whether we are in a clean output state +dirty="$( if [[ -d "${OS_OUTPUT}" ]]; then echo '1'; fi )" + +os::util::ensure::system_binary_exists rpmbuild +os::util::ensure::system_binary_exists createrepo + +if [[ -n "${OS_BUILD_SRPM-}" ]]; then + srpm="a" +else + srpm="b" +fi + +os::build::rpm::get_nvra_vars + +OS_RPM_SPECFILE="$( find "${OS_ROOT}" -name '*.spec' )" +OS_RPM_SPECQUERY="$( rpmspec -q --qf '%{name}\n' "${OS_RPM_SPECFILE}" )" +OS_RPM_NAME="$( head -1 <<< "${OS_RPM_SPECQUERY}" )" + +os::log::info "Building release RPMs for ${OS_RPM_SPECFILE} ..." + +rpm_tmp_dir="${BASETMPDIR}/rpm" + +# RPM requires the spec file be owned by the invoking user +chown "$(id -u):$(id -g)" "${OS_RPM_SPECFILE}" || true + +if [[ -n "${dirty}" && "${OS_GIT_TREE_STATE}" == "dirty" ]]; then + os::log::warning "Repository is not clean, performing fast build and reusing _output" + + # build and output from source to destination + rm -rf "${rpm_tmp_dir}" + mkdir -p "${rpm_tmp_dir}" + ln -fns "${OS_ROOT}" "${rpm_tmp_dir}/SOURCES" + ln -fns "${OS_ROOT}" "${rpm_tmp_dir}/BUILD" + rpmbuild -bb "${OS_RPM_SPECFILE}" \ + --define "_sourcedir ${rpm_tmp_dir}/SOURCES" \ + --define "_builddir ${rpm_tmp_dir}/BUILD" \ + --define "skip_prep 1" \ + --define "skip_dist ${SKIP_DIST:-1}" \ + --define "version ${OS_RPM_VERSION}" \ + --define "release ${OS_RPM_RELEASE}" \ + --define "commit ${OS_GIT_COMMIT}" \ + --define "os_git_vars ${OS_RPM_GIT_VARS}" \ + --define "_topdir ${rpm_tmp_dir}" + + mkdir -p "${OS_OUTPUT_RPMPATH}" + mv -f "${rpm_tmp_dir}"/RPMS/*/*.rpm "${OS_OUTPUT_RPMPATH}" + +else + rm -rf "${rpm_tmp_dir}/SOURCES" + mkdir -p "${rpm_tmp_dir}/SOURCES" + tar czf "${rpm_tmp_dir}/SOURCES/${OS_RPM_NAME}-${OS_RPM_VERSION}.tar.gz" \ + --owner=0 --group=0 \ + --exclude=_output --exclude=.git \ + --transform "s|^|${OS_RPM_NAME}-${OS_RPM_VERSION}/|rSH" \ + . + + rpmbuild -b${srpm} "${OS_RPM_SPECFILE}" \ + --define "skip_dist ${SKIP_DIST:-1}" \ + --define "version ${OS_RPM_VERSION}" \ + --define "release ${OS_RPM_RELEASE}" \ + --define "commit ${OS_GIT_COMMIT}" \ + --define "os_git_vars ${OS_RPM_GIT_VARS}" \ + --define "_topdir ${rpm_tmp_dir}" + + output_directory="$( find "${rpm_tmp_dir}" -type d -path "*/BUILD/${OS_RPM_NAME}-${OS_RPM_VERSION}/_output/local" )" + if [[ -z "${output_directory}" ]]; then + os::log::fatal 'No _output artifact directory found in rpmbuild artifacts!' + fi + + # migrate the rpm artifacts to the output directory, must be clean or move will fail + make clean + mkdir -p "${OS_OUTPUT}" + + # mv exits prematurely with status 1 in the following scenario: running as root, + # attempting to move a [directory tree containing a] symlink to a destination on + # an NFS volume exported with root_squash set. This can occur when running this + # script on a Vagrant box. The error shown is "mv: failed to preserve ownership + # for $FILE: Operation not permitted". As a workaround, if + # ${output_directory} and ${OS_OUTPUT} are on different devices, use cp and + # rm instead. + if [[ $(stat -c %d "${output_directory}") == $(stat -c %d "${OS_OUTPUT}") ]]; then + mv "${output_directory}"/* "${OS_OUTPUT}" + else + cp -R "${output_directory}"/* "${OS_OUTPUT}" + rm -rf "${output_directory:?}"/* + fi + + mkdir -p "${OS_OUTPUT_RPMPATH}" + if [[ -n "${OS_BUILD_SRPM-}" ]]; then + mv -f "${rpm_tmp_dir}"/SRPMS/*src.rpm "${OS_OUTPUT_RPMPATH}" + fi + mv -f "${rpm_tmp_dir}"/RPMS/*/*.rpm "${OS_OUTPUT_RPMPATH}" +fi + +mkdir -p "${OS_OUTPUT_RELEASEPATH}" +echo "${OS_GIT_COMMIT}" > "${OS_OUTPUT_RELEASEPATH}/.commit" + +repo_path="$( os::util::absolute_path "${OS_OUTPUT_RPMPATH}" )" +createrepo "${repo_path}" + +echo "[${OS_RPM_NAME}-local-release] +baseurl = file://${repo_path} +gpgcheck = 0 +name = Release from Local Source for ${OS_RPM_NAME} +enabled = 1 +" > "${repo_path}/local-release.repo" + +# DEPRECATED: preserve until jobs migrate to using local-release.repo +cp "${repo_path}/local-release.repo" "${repo_path}/origin-local-release.repo" + +os::log::info "Repository file for \`yum\` or \`dnf\` placed at ${repo_path}/local-release.repo +Install it with: +$ mv '${repo_path}/local-release.repo' '/etc/yum.repos.d" diff --git a/openshift-hack/cmd/go-imports-diff/main.go b/openshift-hack/cmd/go-imports-diff/main.go new file mode 100644 index 0000000000000..6d7ec96f55cff --- /dev/null +++ b/openshift-hack/cmd/go-imports-diff/main.go @@ -0,0 +1,74 @@ +package main + +import ( + "flag" + "fmt" + "go/parser" + "go/token" + "os" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +const testPackagePrefix = "k8s.io/kubernetes/test/e2e" + +func main() { + // Parse flags + excludeList := flag.String("exclude", "", "Comma-separated list of imports to be ignored") + flag.Parse() + + // Parse positional arguments + args := flag.Args() + if len(args) != 2 { + fmt.Fprintf(os.Stderr, "Usage: %s [flags] \n", os.Args[0]) + flag.PrintDefaults() + os.Exit(2) + } + baseFile := args[0] + compareFile := args[1] + + // Parse the base file + baseNode, err := parser.ParseFile(token.NewFileSet(), baseFile, nil, parser.AllErrors) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to parse go file %s: %v\n", baseFile, err) + os.Exit(1) + } + + // Create a slice containing imports of base file + baseImports := sets.New[string]() + for _, imp := range baseNode.Imports { + v := strings.Trim(imp.Path.Value, `"`) + if !strings.Contains(v, testPackagePrefix) { + continue + } + baseImports.Insert(v) + } + + // Parse file that is compared with the base one + compareNode, err := parser.ParseFile(token.NewFileSet(), compareFile, nil, parser.AllErrors) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to parse go file %s: %v\n", baseFile, err) + os.Exit(1) + } + + // Create a slice containing imports of compare file + compareImports := sets.New[string]() + for _, imp := range compareNode.Imports { + v := strings.Trim(imp.Path.Value, `"`) + if !strings.Contains(v, testPackagePrefix) { + continue + } + compareImports.Insert(v) + } + + // Compare imports of both files + exclude := strings.Split(*excludeList, ",") + diff := baseImports.Difference(compareImports).Delete(exclude...).UnsortedList() + if len(diff) > 0 { + sort.Strings(diff) + fmt.Fprintf(os.Stderr, "Imports from %q not in %q:\n\n%s\n", baseFile, compareFile, strings.Join(diff, "\n")) + os.Exit(1) + } +} diff --git a/openshift-hack/cmd/k8s-tests-ext/k8s-tests.go b/openshift-hack/cmd/k8s-tests-ext/k8s-tests.go new file mode 100644 index 0000000000000..03b626a827a6e --- /dev/null +++ b/openshift-hack/cmd/k8s-tests-ext/k8s-tests.go @@ -0,0 +1,112 @@ +package main + +import ( + "flag" + "k8s.io/kubernetes/test/e2e/framework" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd" + e "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + g "github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo" + v "github.com/openshift-eng/openshift-tests-extension/pkg/version" + + "k8s.io/client-go/pkg/version" + utilflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" + + // initialize framework extensions + _ "k8s.io/kubernetes/test/e2e/framework/debug/init" + _ "k8s.io/kubernetes/test/e2e/framework/metrics/init" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) + + // These flags are used to pull in the default values to test context - required + // so tests run correctly, even if the underlying flags aren't used. + framework.RegisterCommonFlags(flag.CommandLine) + framework.RegisterClusterFlags(flag.CommandLine) + + // Get version info from kube + kubeVersion := version.Get() + v.GitTreeState = kubeVersion.GitTreeState + v.BuildDate = kubeVersion.BuildDate + v.CommitFromGit = kubeVersion.GitCommit + + // Create our registry of openshift-tests extensions + extensionRegistry := e.NewRegistry() + kubeTestsExtension := e.NewExtension("openshift", "payload", "hyperkube") + extensionRegistry.Register(kubeTestsExtension) + + // Carve up the kube tests into our openshift suites... + kubeTestsExtension.AddSuite(e.Suite{ + Name: "kubernetes/conformance/parallel", + Parents: []string{ + "openshift/conformance/parallel", + "openshift/conformance/parallel/minimal", + }, + Qualifiers: []string{`!labels.exists(l, l == "Serial") && labels.exists(l, l == "Conformance")`}, + }) + + kubeTestsExtension.AddSuite(e.Suite{ + Name: "kubernetes/conformance/serial", + Parents: []string{ + "openshift/conformance/serial", + "openshift/conformance/serial/minimal", + }, + Qualifiers: []string{`labels.exists(l, l == "Serial") && labels.exists(l, l == "Conformance")`}, + }) + + //FIXME(stbenjam): what other suites does k8s-test contribute to? + + // Build our specs from ginkgo + specs, err := g.BuildExtensionTestSpecsFromOpenShiftGinkgoSuite() + if err != nil { + panic(err) + } + + // Initialization for kube ginkgo test framework needs to run before all tests execute + specs.AddBeforeAll(func() { + if err := initializeTestFramework(os.Getenv("TEST_PROVIDER")); err != nil { + panic(err) + } + }) + + // Annotations get appended to test names, these are additions to upstream + // tests for controlling skips, suite membership, etc. + // + // TODO: + // - Remove this annotation code, and migrate to Labels/Tags and + // the environmental skip code from the enhancement once its implemented. + // - Make sure to account for test renames that occur because of removal of these + // annotations + specs.Walk(func(spec *extensiontests.ExtensionTestSpec) { + if annotations, ok := generated.Annotations[spec.Name]; ok { + spec.Name += annotations + } + }) + + kubeTestsExtension.AddSpecs(specs) + + // Cobra stuff + root := &cobra.Command{ + Long: "Kubernetes tests extension for OpenShift", + } + + root.AddCommand( + cmd.DefaultExtensionCommands(extensionRegistry)..., + ) + + if err := func() error { + return root.Execute() + }(); err != nil { + os.Exit(1) + } +} diff --git a/openshift-hack/cmd/k8s-tests-ext/provider.go b/openshift-hack/cmd/k8s-tests-ext/provider.go new file mode 100644 index 0000000000000..cdc948a45c652 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests-ext/provider.go @@ -0,0 +1,147 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + kclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/openshift-hack/e2e" + conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/testfiles" + "k8s.io/kubernetes/test/e2e/storage/external" + e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" + testfixtures "k8s.io/kubernetes/test/fixtures" + + // this appears to inexplicably auto-register global flags. + _ "k8s.io/kubernetes/test/e2e/storage/drivers" + + // these are loading important global flags that we need to get and set + _ "k8s.io/kubernetes/test/e2e" + _ "k8s.io/kubernetes/test/e2e/lifecycle" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/provider.go +// and github.com/openshift/origin/test/extended/util/test.go +func initializeTestFramework(provider string) error { + providerInfo := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), &providerInfo); err != nil { + return fmt.Errorf("provider must be a JSON object with the 'type' key at a minimum: %v", err) + } + if len(providerInfo.ProviderName) == 0 { + return fmt.Errorf("provider must be a JSON object with the 'type' key") + } + config := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), config); err != nil { + return fmt.Errorf("provider must decode into the ClusterConfig object: %v", err) + } + + // update testContext with loaded config + testContext := &framework.TestContext + testContext.Provider = config.ProviderName + testContext.CloudConfig = framework.CloudConfig{ + ProjectID: config.ProjectID, + Region: config.Region, + Zone: config.Zone, + Zones: config.Zones, + NumNodes: config.NumNodes, + MultiMaster: config.MultiMaster, + MultiZone: config.MultiZone, + ConfigFile: config.ConfigFile, + } + testContext.AllowedNotReadyNodes = -1 + testContext.MinStartupPods = -1 + testContext.MaxNodesToGather = 0 + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // allow the CSI tests to access test data, but only briefly + // TODO: ideally CSI would not use any of these test methods + // var err error + // exutil.WithCleanup(func() { err = initCSITests(dryRun) }) + // TODO: for now I'm only initializing CSI directly, but we probably need that + // WithCleanup here as well + if err := initCSITests(); err != nil { + return err + } + + if ad := os.Getenv("ARTIFACT_DIR"); len(strings.TrimSpace(ad)) == 0 { + os.Setenv("ARTIFACT_DIR", filepath.Join(os.TempDir(), "artifacts")) + } + + testContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" + testContext.VerifyServiceAccount = true + testfiles.AddFileSource(e2etestingmanifests.GetE2ETestingManifestsFS()) + testfiles.AddFileSource(testfixtures.GetTestFixturesFS()) + testfiles.AddFileSource(conformancetestdata.GetConformanceTestdataFS()) + testContext.KubectlPath = "kubectl" + // context.KubeConfig = KubeConfigPath() + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // "debian" is used when not set. At least GlusterFS tests need "custom". + // (There is no option for "rhel" or "centos".) + testContext.NodeOSDistro = "custom" + testContext.MasterOSDistro = "custom" + + // load and set the host variable for kubectl + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: testContext.KubeConfig}, &clientcmd.ConfigOverrides{}) + cfg, err := clientConfig.ClientConfig() + if err != nil { + return err + } + testContext.Host = cfg.Host + + // Ensure that Kube tests run privileged (like they do upstream) + testContext.CreateTestingNS = func(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*corev1.Namespace, error) { + return e2e.CreateTestingNS(ctx, baseName, c, labels, true) + } + + gomega.RegisterFailHandler(ginkgo.Fail) + + framework.AfterReadingAllFlags(testContext) + testContext.DumpLogsOnFailure = true + + // these constants are taken from kube e2e and used by tests + testContext.IPFamily = "ipv4" + if config.HasIPv6 && !config.HasIPv4 { + testContext.IPFamily = "ipv6" + } + + testContext.ReportDir = os.Getenv("TEST_JUNIT_DIR") + + return nil +} + +const ( + manifestEnvVar = "TEST_CSI_DRIVER_FILES" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/csi.go +// Initialize openshift/csi suite, i.e. define CSI tests from TEST_CSI_DRIVER_FILES. +func initCSITests() error { + manifestList := os.Getenv(manifestEnvVar) + if manifestList != "" { + manifests := strings.Split(manifestList, ",") + for _, manifest := range manifests { + if err := external.AddDriverDefinition(manifest); err != nil { + return fmt.Errorf("failed to load manifest from %q: %s", manifest, err) + } + // Register the base dir of the manifest file as a file source. + // With this we can reference the CSI driver's storageClass + // in the manifest file (FromFile field). + testfiles.AddFileSource(testfiles.RootFileSource{ + Root: filepath.Dir(manifest), + }) + } + } + + return nil +} diff --git a/openshift-hack/cmd/k8s-tests-ext/types.go b/openshift-hack/cmd/k8s-tests-ext/types.go new file mode 100644 index 0000000000000..b43652499537d --- /dev/null +++ b/openshift-hack/cmd/k8s-tests-ext/types.go @@ -0,0 +1,47 @@ +package main + +// copied directly from github.com/openshift/origin/test/extended/util/cluster/cluster.go +type ClusterConfiguration struct { + ProviderName string `json:"type"` + + // These fields (and the "type" tag for ProviderName) chosen to match + // upstream's e2e.CloudConfig. + ProjectID string + Region string + Zone string + NumNodes int + MultiMaster bool + MultiZone bool + Zones []string + ConfigFile string + + // Disconnected is set for test jobs without external internet connectivity + Disconnected bool + + // SingleReplicaTopology is set for disabling disruptive tests or tests + // that require high availability + SingleReplicaTopology bool + + // NetworkPlugin is the "official" plugin name + NetworkPlugin string + // NetworkPluginMode is an optional sub-identifier for the NetworkPlugin. + // (Currently it is only used for OpenShiftSDN.) + NetworkPluginMode string `json:",omitempty"` + + // HasIPv4 and HasIPv6 determine whether IPv4-specific, IPv6-specific, + // and dual-stack-specific tests are run + HasIPv4 bool + HasIPv6 bool + + // HasSCTP determines whether SCTP connectivity tests can be run in the cluster + HasSCTP bool + + // IsProxied determines whether we are accessing the cluster through an HTTP proxy + IsProxied bool + + // IsIBMROKS determines whether the cluster is Managed IBM Cloud (ROKS) + IsIBMROKS bool + + // IsNoOptionalCapabilities indicates the cluster has no optional capabilities enabled + HasNoOptionalCapabilities bool +} diff --git a/openshift-hack/cmd/k8s-tests/k8s-tests.go b/openshift-hack/cmd/k8s-tests/k8s-tests.go new file mode 100644 index 0000000000000..fedd8b16f0141 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests/k8s-tests.go @@ -0,0 +1,98 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "math/rand" + "os" + "sort" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + utilflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + "k8s.io/kubernetes/test/e2e/framework" + + // initialize framework extensions + _ "k8s.io/kubernetes/test/e2e/framework/debug/init" + _ "k8s.io/kubernetes/test/e2e/framework/metrics/init" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + + rand.Seed(time.Now().UTC().UnixNano()) + + pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) + + root := &cobra.Command{ + Long: "OpenShift Tests compatible wrapper", + } + + root.AddCommand( + newRunTestCommand(), + newListTestsCommand(), + ) + + f := flag.CommandLine.Lookup("v") + root.PersistentFlags().AddGoFlag(f) + pflag.CommandLine = pflag.NewFlagSet("empty", pflag.ExitOnError) + flag.CommandLine = flag.NewFlagSet("empty", flag.ExitOnError) + framework.RegisterCommonFlags(flag.CommandLine) + framework.RegisterClusterFlags(flag.CommandLine) + + if err := func() error { + return root.Execute() + }(); err != nil { + if ex, ok := err.(ExitError); ok { + fmt.Fprintf(os.Stderr, "Ginkgo exit error %d: %v\n", ex.Code, err) + os.Exit(ex.Code) + } + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } +} + +func newRunTestCommand() *cobra.Command { + testOpt := NewTestOptions(os.Stdout, os.Stderr) + + cmd := &cobra.Command{ + Use: "run-test NAME", + Short: "Run a single test by name", + Long: "Execute a single test.", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + if err := initializeTestFramework(os.Getenv("TEST_PROVIDER")); err != nil { + return err + } + + return testOpt.Run(args) + }, + } + return cmd +} + +func newListTestsCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List available tests", + Long: "List the available tests in this binary.", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + tests := testsForSuite() + sort.Slice(tests, func(i, j int) bool { return tests[i].Name < tests[j].Name }) + data, err := json.Marshal(tests) + if err != nil { + return err + } + fmt.Fprintf(os.Stdout, "%s\n", data) + return nil + }, + } + + return cmd +} diff --git a/openshift-hack/cmd/k8s-tests/provider.go b/openshift-hack/cmd/k8s-tests/provider.go new file mode 100644 index 0000000000000..cdc948a45c652 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests/provider.go @@ -0,0 +1,147 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + kclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/openshift-hack/e2e" + conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/testfiles" + "k8s.io/kubernetes/test/e2e/storage/external" + e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" + testfixtures "k8s.io/kubernetes/test/fixtures" + + // this appears to inexplicably auto-register global flags. + _ "k8s.io/kubernetes/test/e2e/storage/drivers" + + // these are loading important global flags that we need to get and set + _ "k8s.io/kubernetes/test/e2e" + _ "k8s.io/kubernetes/test/e2e/lifecycle" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/provider.go +// and github.com/openshift/origin/test/extended/util/test.go +func initializeTestFramework(provider string) error { + providerInfo := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), &providerInfo); err != nil { + return fmt.Errorf("provider must be a JSON object with the 'type' key at a minimum: %v", err) + } + if len(providerInfo.ProviderName) == 0 { + return fmt.Errorf("provider must be a JSON object with the 'type' key") + } + config := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), config); err != nil { + return fmt.Errorf("provider must decode into the ClusterConfig object: %v", err) + } + + // update testContext with loaded config + testContext := &framework.TestContext + testContext.Provider = config.ProviderName + testContext.CloudConfig = framework.CloudConfig{ + ProjectID: config.ProjectID, + Region: config.Region, + Zone: config.Zone, + Zones: config.Zones, + NumNodes: config.NumNodes, + MultiMaster: config.MultiMaster, + MultiZone: config.MultiZone, + ConfigFile: config.ConfigFile, + } + testContext.AllowedNotReadyNodes = -1 + testContext.MinStartupPods = -1 + testContext.MaxNodesToGather = 0 + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // allow the CSI tests to access test data, but only briefly + // TODO: ideally CSI would not use any of these test methods + // var err error + // exutil.WithCleanup(func() { err = initCSITests(dryRun) }) + // TODO: for now I'm only initializing CSI directly, but we probably need that + // WithCleanup here as well + if err := initCSITests(); err != nil { + return err + } + + if ad := os.Getenv("ARTIFACT_DIR"); len(strings.TrimSpace(ad)) == 0 { + os.Setenv("ARTIFACT_DIR", filepath.Join(os.TempDir(), "artifacts")) + } + + testContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" + testContext.VerifyServiceAccount = true + testfiles.AddFileSource(e2etestingmanifests.GetE2ETestingManifestsFS()) + testfiles.AddFileSource(testfixtures.GetTestFixturesFS()) + testfiles.AddFileSource(conformancetestdata.GetConformanceTestdataFS()) + testContext.KubectlPath = "kubectl" + // context.KubeConfig = KubeConfigPath() + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // "debian" is used when not set. At least GlusterFS tests need "custom". + // (There is no option for "rhel" or "centos".) + testContext.NodeOSDistro = "custom" + testContext.MasterOSDistro = "custom" + + // load and set the host variable for kubectl + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: testContext.KubeConfig}, &clientcmd.ConfigOverrides{}) + cfg, err := clientConfig.ClientConfig() + if err != nil { + return err + } + testContext.Host = cfg.Host + + // Ensure that Kube tests run privileged (like they do upstream) + testContext.CreateTestingNS = func(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*corev1.Namespace, error) { + return e2e.CreateTestingNS(ctx, baseName, c, labels, true) + } + + gomega.RegisterFailHandler(ginkgo.Fail) + + framework.AfterReadingAllFlags(testContext) + testContext.DumpLogsOnFailure = true + + // these constants are taken from kube e2e and used by tests + testContext.IPFamily = "ipv4" + if config.HasIPv6 && !config.HasIPv4 { + testContext.IPFamily = "ipv6" + } + + testContext.ReportDir = os.Getenv("TEST_JUNIT_DIR") + + return nil +} + +const ( + manifestEnvVar = "TEST_CSI_DRIVER_FILES" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/csi.go +// Initialize openshift/csi suite, i.e. define CSI tests from TEST_CSI_DRIVER_FILES. +func initCSITests() error { + manifestList := os.Getenv(manifestEnvVar) + if manifestList != "" { + manifests := strings.Split(manifestList, ",") + for _, manifest := range manifests { + if err := external.AddDriverDefinition(manifest); err != nil { + return fmt.Errorf("failed to load manifest from %q: %s", manifest, err) + } + // Register the base dir of the manifest file as a file source. + // With this we can reference the CSI driver's storageClass + // in the manifest file (FromFile field). + testfiles.AddFileSource(testfiles.RootFileSource{ + Root: filepath.Dir(manifest), + }) + } + } + + return nil +} diff --git a/openshift-hack/cmd/k8s-tests/runtest.go b/openshift-hack/cmd/k8s-tests/runtest.go new file mode 100644 index 0000000000000..0abff33438fc3 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests/runtest.go @@ -0,0 +1,143 @@ +package main + +import ( + "fmt" + "io" + "os" + "regexp" + "strings" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" + + "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" + + // ensure all the ginkgo tests are loaded + _ "k8s.io/kubernetes/openshift-hack/e2e" +) + +// TestOptions handles running a single test. +type TestOptions struct { + Out io.Writer + ErrOut io.Writer +} + +var _ ginkgo.GinkgoTestingT = &TestOptions{} + +func NewTestOptions(out io.Writer, errOut io.Writer) *TestOptions { + return &TestOptions{ + Out: out, + ErrOut: errOut, + } +} + +func (opt *TestOptions) Run(args []string) error { + if len(args) != 1 { + return fmt.Errorf("only a single test name may be passed") + } + + // Ignore the upstream suite behavior within test execution + ginkgo.GetSuite().ClearBeforeAndAfterSuiteNodes() + tests := testsForSuite() + var test *TestCase + for _, t := range tests { + if t.Name == args[0] { + test = t + break + } + } + if test == nil { + return fmt.Errorf("no test exists with that name: %s", args[0]) + } + + suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration() + suiteConfig.FocusStrings = []string{fmt.Sprintf("^ %s$", regexp.QuoteMeta(test.Name))} + + // These settings are matched to upstream's ginkgo configuration. See: + // https://github.com/kubernetes/kubernetes/blob/v1.25.0/test/e2e/framework/test_context.go#L354-L355 + // Randomize specs as well as suites + suiteConfig.RandomizeAllSpecs = true + // https://github.com/kubernetes/kubernetes/blob/v1.25.0/hack/ginkgo-e2e.sh#L172-L173 + suiteConfig.Timeout = 24 * time.Hour + reporterConfig.NoColor = true + reporterConfig.Verbose = true + + ginkgo.SetReporterConfig(reporterConfig) + + cwd, err := os.Getwd() + if err != nil { + return err + } + ginkgo.GetSuite().RunSpec(test.spec, ginkgo.Labels{}, "Kubernetes e2e suite", cwd, ginkgo.GetFailer(), ginkgo.GetWriter(), suiteConfig, reporterConfig) + + var summary types.SpecReport + for _, report := range ginkgo.GetSuite().GetReport().SpecReports { + if report.NumAttempts > 0 { + summary = report + } + } + + switch { + case summary.State == types.SpecStatePassed: + // do nothing + case summary.State == types.SpecStateSkipped: + if len(summary.Failure.Message) > 0 { + fmt.Fprintf(opt.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message) + } + if len(summary.Failure.ForwardedPanic) > 0 { + fmt.Fprintf(opt.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic) + } + return ExitError{Code: 3} + case summary.State == types.SpecStateFailed, summary.State == types.SpecStatePanicked, summary.State == types.SpecStateInterrupted: + if len(summary.Failure.ForwardedPanic) > 0 { + if len(summary.Failure.Location.FullStackTrace) > 0 { + fmt.Fprintf(opt.ErrOut, "\n%s\n", summary.Failure.Location.FullStackTrace) + } + fmt.Fprintf(opt.ErrOut, "fail [%s:%d]: Test Panicked: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic) + return ExitError{Code: 1} + } + fmt.Fprintf(opt.ErrOut, "fail [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message) + return ExitError{Code: 1} + default: + return fmt.Errorf("unrecognized test case outcome: %#v", summary) + } + return nil +} + +func (opt *TestOptions) Fail() { + // this function allows us to pass TestOptions as the first argument, + // it's empty becase we have failure check mechanism implemented above. +} + +func lastFilenameSegment(filename string) string { + if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 { + return parts[len(parts)-1] + } + if parts := strings.Split(filename, "/src/"); len(parts) > 1 { + return parts[len(parts)-1] + } + return filename +} + +func testsForSuite() []*TestCase { + var tests []*TestCase + + // Don't build the tree multiple times, it results in multiple initing of tests + if !ginkgo.GetSuite().InPhaseBuildTree() { + ginkgo.GetSuite().BuildTree() + } + + ginkgo.GetSuite().WalkTests(func(name string, spec types.TestSpec) { + testCase := &TestCase{ + Name: spec.Text(), + locations: spec.CodeLocations(), + spec: spec, + } + if labels, ok := generated.Annotations[name]; ok { + testCase.Labels = labels + } + tests = append(tests, testCase) + }) + return tests +} diff --git a/openshift-hack/cmd/k8s-tests/types.go b/openshift-hack/cmd/k8s-tests/types.go new file mode 100644 index 0000000000000..29a0b5b5efa4e --- /dev/null +++ b/openshift-hack/cmd/k8s-tests/types.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2/types" +) + +// copied directly from github.com/openshift/origin/test/extended/util/cluster/cluster.go +type ClusterConfiguration struct { + ProviderName string `json:"type"` + + // These fields (and the "type" tag for ProviderName) chosen to match + // upstream's e2e.CloudConfig. + ProjectID string + Region string + Zone string + NumNodes int + MultiMaster bool + MultiZone bool + Zones []string + ConfigFile string + + // Disconnected is set for test jobs without external internet connectivity + Disconnected bool + + // SingleReplicaTopology is set for disabling disruptive tests or tests + // that require high availability + SingleReplicaTopology bool + + // NetworkPlugin is the "official" plugin name + NetworkPlugin string + // NetworkPluginMode is an optional sub-identifier for the NetworkPlugin. + // (Currently it is only used for OpenShiftSDN.) + NetworkPluginMode string `json:",omitempty"` + + // HasIPv4 and HasIPv6 determine whether IPv4-specific, IPv6-specific, + // and dual-stack-specific tests are run + HasIPv4 bool + HasIPv6 bool + + // HasSCTP determines whether SCTP connectivity tests can be run in the cluster + HasSCTP bool + + // IsProxied determines whether we are accessing the cluster through an HTTP proxy + IsProxied bool + + // IsIBMROKS determines whether the cluster is Managed IBM Cloud (ROKS) + IsIBMROKS bool + + // IsNoOptionalCapabilities indicates the cluster has no optional capabilities enabled + HasNoOptionalCapabilities bool +} + +// copied directly from github.com/openshift/origin/pkg/test/ginkgo/test.go +type TestCase struct { + Name string + Labels string + spec types.TestSpec + locations []types.CodeLocation +} + +type ExitError struct { + Code int +} + +func (e ExitError) Error() string { + return fmt.Sprintf("exit with code %d", e.Code) +} diff --git a/openshift-hack/conformance-k8s.sh b/openshift-hack/conformance-k8s.sh new file mode 100755 index 0000000000000..a2680b7f7f270 --- /dev/null +++ b/openshift-hack/conformance-k8s.sh @@ -0,0 +1,96 @@ +#!/bin/bash +# +# Runs the Kubernetes conformance suite against an OpenShift cluster +# +# Test prerequisites: +# +# * all nodes that users can run workloads under marked as schedulable +# +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +# Check inputs +if [[ -z "${KUBECONFIG-}" ]]; then + os::log::fatal "KUBECONFIG must be set to a root account" +fi +test_report_dir="${ARTIFACT_DIR}" +mkdir -p "${test_report_dir}" + +cat < "${test_report_dir}/README.md" +This conformance report is generated by the OpenShift CI infrastructure. The canonical source location for this test script is located at https://github.com/openshift/kubernetes/blob/master/openshift-hack/conformance-k8s.sh + +This file was generated by: + + Commit $( git rev-parse HEAD || "" ) + Tag $( git describe || "" ) + +To recreate these results + +1. Install an [OpenShift cluster](https://docs.openshift.com/container-platform/) +2. Retrieve a \`.kubeconfig\` file with administrator credentials on that cluster and set the environment variable KUBECONFIG + + export KUBECONFIG=PATH_TO_KUBECONFIG + +3. Clone the OpenShift source repository and change to that directory: + + git clone https://github.com/openshift/kubernetes.git + cd kubernetes + +4. Place the \`oc\` binary for that cluster in your PATH +5. Run the conformance test: + + openshift-hack/conformance-k8s.sh + +Nightly conformance tests are run against release branches and reported https://openshift-gce-devel.appspot.com/builds/origin-ci-test/logs/periodic-ci-origin-conformance-k8s/ +END + +version="$(sed -rn 's/.*io.openshift.build.versions="kubernetes=(1.[0-9]+.[0-9]+(-rc.[0-9])?)"/v\1/p' openshift-hack/images/hyperkube/Dockerfile.rhel)" +os::log::info "Running Kubernetes conformance suite for ${version}" + +# Execute OpenShift prerequisites +# Disable container security +oc adm policy add-scc-to-group privileged system:authenticated system:serviceaccounts +oc adm policy add-scc-to-group anyuid system:authenticated system:serviceaccounts +unschedulable="$( ( oc get nodes -o name -l 'node-role.kubernetes.io/master'; ) | wc -l )" +# TODO: undo these operations + +# Execute Kubernetes prerequisites +make WHAT=cmd/kubectl +make WHAT=test/e2e/e2e.test +make WHAT=vendor/github.com/onsi/ginkgo/v2/ginkgo +PATH="${OS_ROOT}/_output/local/bin/$( os::build::host_platform ):${PATH}" +export PATH + +kubectl version > "${test_report_dir}/version.txt" +echo "-----" >> "${test_report_dir}/version.txt" +oc version >> "${test_report_dir}/version.txt" + +# Run the test, serial tests first, then parallel + +rc=0 + +e2e_test="$( which e2e.test )" + +# shellcheck disable=SC2086 +ginkgo \ + -nodes 1 -no-color '-focus=(\[Conformance\].*\[Serial\]|\[Serial\].*\[Conformance\])' \ + ${e2e_test} -- \ + -report-dir "${test_report_dir}" \ + -allowed-not-ready-nodes ${unschedulable} \ + 2>&1 | tee -a "${test_report_dir}/e2e.log" || rc=1 + +rename -v junit_ junit_serial_ "${test_report_dir}"/junit*.xml + +# shellcheck disable=SC2086 +ginkgo \ + --timeout="24h" \ + --output-interceptor-mode=none \ + -nodes 4 -no-color '-skip=\[Serial\]' '-focus=\[Conformance\]' \ + ${e2e_test} -- \ + -report-dir "${test_report_dir}" \ + -allowed-not-ready-nodes ${unschedulable} \ + 2>&1 | tee -a "${test_report_dir}/e2e.log" || rc=1 + +echo +echo "Run complete, results in ${test_report_dir}" + +exit $rc diff --git a/openshift-hack/create-or-update-rebase-branch.sh b/openshift-hack/create-or-update-rebase-branch.sh new file mode 100755 index 0000000000000..c948eb874850a --- /dev/null +++ b/openshift-hack/create-or-update-rebase-branch.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +set -o nounset +set -o errexit +set -o pipefail + +# This script is intended to simplify the maintaining a rebase branch for +# openshift/kubernetes. +# +# - If the branch named by REBASE_BRANCH does not exist, it will be created by +# branching from UPSTREAM_TAG and merging in TARGET_BRANCH with strategy +# 'ours'. +# +# - If the branch named by REBASE_BRANCH exists, it will be renamed to +# -, a new branch will be created as per above, and +# carries from the renamed branch will be cherry-picked. + +UPSTREAM_TAG="${UPSTREAM_TAG:-}" +if [[ -z "${UPSTREAM_TAG}" ]]; then + echo >&2 "UPSTREAM_TAG is required" + exit 1 +fi + +REBASE_BRANCH="${REBASE_BRANCH:-}" +if [[ -z "${REBASE_BRANCH}" ]]; then + echo >&2 "REBASE_BRANCH is required" + exit 1 +fi + +TARGET_BRANCH="${TARGET_BRANCH:-master}" +if [[ -z "${TARGET_BRANCH}" ]]; then + echo >&2 "TARGET_BRANCH is required" + exit 1 +fi + +echo "Ensuring target branch '${TARGET_BRANCH} is updated" +git co "${TARGET_BRANCH}" +git pull + +echo "Checking if '${REBASE_BRANCH}' exists" +REBASE_IN_PROGRESS= +if git show-ref --verify --quiet "refs/heads/${REBASE_BRANCH}"; then + REBASE_IN_PROGRESS=y +fi + +# If a rebase is in progress, rename the existing branch +if [[ "${REBASE_IN_PROGRESS}" ]]; then + TIMESTAMP="$(date +"%Y-%m-%d_%H-%M-%S")" + PREVIOUS_REBASE_BRANCH="${REBASE_BRANCH}.${TIMESTAMP}" + echo "Renaming rebase branch '${REBASE_BRANCH}' to '${PREVIOUS_REBASE_BRANCH}'" + git br -m "${REBASE_BRANCH}" "${PREVIOUS_REBASE_BRANCH}" +fi + +echo "Branching upstream tag '${UPSTREAM_TAG}' to rebase branch '${REBASE_BRANCH}'" +git co -b "${REBASE_BRANCH}" "${UPSTREAM_TAG}" + +echo "Merging target branch '${TARGET_BRANCH}' to rebase branch '${REBASE_BRANCH}'" +git merge -s ours --no-edit "${TARGET_BRANCH}" + +if [[ "${REBASE_IN_PROGRESS}" ]]; then + echo "Cherry-picking carried commits from previous rebase branch '${PREVIOUS_REBASE_BRANCH}'" + # The first merge in the previous rebase branch should be the point at which + # the target branch was merged with the upstream tag. Any commits since this + # merge should be cherry-picked. + MERGE_SHA="$(git log --pretty=%H --merges --max-count=1 "${PREVIOUS_REBASE_BRANCH}" )" + git cherry-pick "${MERGE_SHA}..${PREVIOUS_REBASE_BRANCH}" +fi diff --git a/openshift-hack/e2e/annotate/annotate.go b/openshift-hack/e2e/annotate/annotate.go new file mode 100644 index 0000000000000..096ae2a00aa96 --- /dev/null +++ b/openshift-hack/e2e/annotate/annotate.go @@ -0,0 +1,290 @@ +package annotate + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "sort" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" +) + +var reHasSig = regexp.MustCompile(`\[sig-[\w-]+\]`) + +// Run generates tests annotations for the targeted package. +// It accepts testMaps which defines labeling rules and filter +// function to remove elements based on test name and their labels. +func Run(testMaps map[string][]string, filter func(name string) bool) { + var errors []string + + if len(os.Args) != 2 && len(os.Args) != 3 { + fmt.Fprintf(os.Stderr, "error: requires exactly one argument\n") + os.Exit(1) + } + filename := os.Args[len(os.Args)-1] + + generator := newGenerator(testMaps) + ginkgo.GetSuite().BuildTree() + ginkgo.GetSuite().WalkTests(generator.generateRename) + if len(generator.errors) > 0 { + errors = append(errors, generator.errors...) + } + + renamer := newRenamerFromGenerated(generator.output) + // generated file has a map[string]string in the following format: + // original k8s name: k8s name with our labels at the end + ginkgo.GetSuite().WalkTests(renamer.updateNodeText) + if len(renamer.missing) > 0 { + var names []string + for name := range renamer.missing { + names = append(names, name) + } + sort.Strings(names) + fmt.Fprintf(os.Stderr, "failed:\n%s\n", strings.Join(names, "\n")) + os.Exit(1) + } + + // All tests must be associated with a sig (either upstream), or downstream + // If you get this error, you should add the [sig-X] tag to your test (if its + // in origin) or if it is upstream add a new rule to rules.go that assigns + // the test in question to the right sig. + // + // Upstream sigs map to teams (if you have representation on that sig, you + // own those tests in origin) + // Downstream sigs: sig-imageregistry, sig-builds, sig-devex + for from, to := range generator.output { + if !reHasSig.MatchString(from) && !reHasSig.MatchString(to) { + errors = append(errors, fmt.Sprintf("all tests must define a [sig-XXXX] tag or have a rule %q", from)) + } + } + if len(errors) > 0 { + sort.Strings(errors) + for _, s := range errors { + fmt.Fprintf(os.Stderr, "failed: %s\n", s) + } + os.Exit(1) + } + + var pairs []string + for testName, labels := range generator.output { + if filter(fmt.Sprintf("%s%s", testName, labels)) { + continue + } + pairs = append(pairs, fmt.Sprintf("%q:\n%q,", testName, labels)) + } + sort.Strings(pairs) + contents := fmt.Sprintf(` +package generated + +import ( + "fmt" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" +) + +var Annotations = map[string]string{ +%s +} + +func init() { + ginkgo.GetSuite().SetAnnotateFn(func(name string, node types.TestSpec) { + if newLabels, ok := Annotations[name]; ok { + node.AppendText(newLabels) + } else { + panic(fmt.Sprintf("unable to find test %%s", name)) + } + }) +} +`, strings.Join(pairs, "\n\n")) + if err := ioutil.WriteFile(filename, []byte(contents), 0644); err != nil { + fmt.Fprintf(os.Stderr, "error: %v", err) + os.Exit(1) + } + if _, err := exec.Command("gofmt", "-s", "-w", filename).Output(); err != nil { + fmt.Fprintf(os.Stderr, "error: %v", err) + os.Exit(1) + } +} + +func newGenerator(testMaps map[string][]string) *ginkgoTestRenamer { + var allLabels []string + matches := make(map[string]*regexp.Regexp) + stringMatches := make(map[string][]string) + + for label, items := range testMaps { + sort.Strings(items) + allLabels = append(allLabels, label) + var remain []string + for _, item := range items { + re := regexp.MustCompile(item) + if p, ok := re.LiteralPrefix(); ok { + stringMatches[label] = append(stringMatches[label], p) + } else { + remain = append(remain, item) + } + } + if len(remain) > 0 { + matches[label] = regexp.MustCompile(strings.Join(remain, `|`)) + } + } + sort.Strings(allLabels) + + excludedTestsFilter := regexp.MustCompile(strings.Join(ExcludedTests, `|`)) + + return &ginkgoTestRenamer{ + allLabels: allLabels, + stringMatches: stringMatches, + matches: matches, + excludedTestsFilter: excludedTestsFilter, + output: make(map[string]string), + } +} + +func newRenamerFromGenerated(names map[string]string) *ginkgoTestRenamer { + return &ginkgoTestRenamer{ + output: names, + missing: make(map[string]struct{}), + } +} + +type ginkgoTestRenamer struct { + // keys defined in TestMaps in openshift-hack/e2e/annotate/rules.go + allLabels []string + // exact substrings to match to apply a particular label + stringMatches map[string][]string + // regular expressions to match to apply a particular label + matches map[string]*regexp.Regexp + // regular expression excluding permanently a set of tests + // see ExcludedTests in openshift-hack/e2e/annotate/rules.go + excludedTestsFilter *regexp.Regexp + + // output from the generateRename and also input for updateNodeText + output map[string]string + // map of unmatched test names + missing map[string]struct{} + // a list of errors to display + errors []string +} + +func (r *ginkgoTestRenamer) updateNodeText(name string, node types.TestSpec) { + if newLables, ok := r.output[name]; ok { + node.AppendText(newLables) + } else { + r.missing[name] = struct{}{} + } +} + +func (r *ginkgoTestRenamer) generateRename(name string, node types.TestSpec) { + newLabels := "" + newName := name + for { + count := 0 + for _, label := range r.allLabels { + // never apply a sig label twice + if strings.HasPrefix(label, "[sig-") && strings.Contains(newName, "[sig-") { + continue + } + if strings.Contains(newName, label) { + continue + } + + var hasLabel bool + for _, segment := range r.stringMatches[label] { + hasLabel = strings.Contains(newName, segment) + if hasLabel { + break + } + } + if !hasLabel { + if re := r.matches[label]; re != nil { + hasLabel = r.matches[label].MatchString(newName) + } + } + + if hasLabel { + count++ + newLabels += " " + label + newName += " " + label + } + } + if count == 0 { + break + } + } + + // Append suite name to test, if it doesn't already have one + if !r.excludedTestsFilter.MatchString(newName) && !strings.Contains(newName, "[Suite:") { + isSerial := strings.Contains(newName, "[Serial]") + isConformance := strings.Contains(newName, "[Conformance]") + switch { + case isSerial && isConformance: + newLabels += " [Suite:openshift/conformance/serial/minimal]" + case isSerial: + newLabels += " [Suite:openshift/conformance/serial]" + case isConformance: + newLabels += " [Suite:openshift/conformance/parallel/minimal]" + default: + newLabels += " [Suite:openshift/conformance/parallel]" + } + } + codeLocations := node.CodeLocations() + if isGoModulePath(codeLocations[len(codeLocations)-1].FileName, "k8s.io/kubernetes", "test/e2e") { + newLabels += " [Suite:k8s]" + } + + if err := checkBalancedBrackets(newName); err != nil { + r.errors = append(r.errors, err.Error()) + } + r.output[name] = newLabels +} + +// isGoModulePath returns true if the packagePath reported by reflection is within a +// module and given module path. When go mod is in use, module and modulePath are not +// contiguous as they were in older golang versions with vendoring, so naive contains +// tests fail. +// +// historically: ".../vendor/k8s.io/kubernetes/test/e2e" +// go.mod: "k8s.io/kubernetes@0.18.4/test/e2e" +func isGoModulePath(packagePath, module, modulePath string) bool { + return regexp.MustCompile(fmt.Sprintf(`\b%s(@[^/]*|)/%s\b`, regexp.QuoteMeta(module), regexp.QuoteMeta(modulePath))).MatchString(packagePath) +} + +// checkBalancedBrackets ensures that square brackets are balanced in generated test +// names. If they are not, it returns an error with the name of the test and a guess +// where the unmatched bracket(s) are. +func checkBalancedBrackets(testName string) error { + stack := make([]int, 0, len(testName)) + for idx, c := range testName { + if c == '[' { + stack = append(stack, idx) + } else if c == ']' { + // case when we start off with a ] + if len(stack) == 0 { + stack = append(stack, idx) + } else { + stack = stack[:len(stack)-1] + } + } + } + + if len(stack) > 0 { + msg := testName + "\n" + outerLoop: + for i := 0; i < len(testName); i++ { + for _, loc := range stack { + if i == loc { + msg += "^" + continue outerLoop + } + } + msg += " " + } + return fmt.Errorf("unbalanced brackets in test name:\n%s\n", msg) + } + + return nil +} diff --git a/openshift-hack/e2e/annotate/annotate_test.go b/openshift-hack/e2e/annotate/annotate_test.go new file mode 100644 index 0000000000000..614c902e29e44 --- /dev/null +++ b/openshift-hack/e2e/annotate/annotate_test.go @@ -0,0 +1,55 @@ +package annotate + +import ( + "fmt" + "os" + "testing" +) + +func Test_checkBalancedBrackets(t *testing.T) { + tests := []struct { + testCase string + testName string + wantErr bool + }{ + { + testCase: "balanced brackets succeeds", + testName: "[sig-storage] Test that storage [apigroup:storage.openshift.io] actually works [Driver:azure][Serial][Late]", + wantErr: false, + }, + { + testCase: "unbalanced brackets errors", + testName: "[sig-storage] Test that storage [apigroup:storage.openshift.io actually works [Driver:azure][Serial][Late]", + wantErr: true, + }, + { + testCase: "start with close bracket errors", + testName: "[sig-storage] test with a random bracket ]", + wantErr: true, + }, + { + testCase: "multiple unbalanced brackets errors", + testName: "[sig-storage Test that storage [apigroup:storage.openshift.io actually works [Driver:azure]", + wantErr: true, + }, + { + testCase: "balanced deeply nested brackets succeeds", + testName: "[[[[[[some weird test with deeply nested brackets]]]]]]", + wantErr: false, + }, + { + testCase: "unbalanced deeply nested brackets errors", + testName: "[[[[[[some weird test with deeply nested brackets]]]]]", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.testCase, func(t *testing.T) { + if err := checkBalancedBrackets(tt.testName); (err != nil) != tt.wantErr { + t.Errorf("checkBalancedBrackets() error = %v, wantErr %v", err, tt.wantErr) + } else if err != nil { + fmt.Fprintf(os.Stderr, "checkBalancedBrackets() success, found expected err = \n%s\n", err.Error()) + } + }) + } +} diff --git a/openshift-hack/e2e/annotate/cmd/main.go b/openshift-hack/e2e/annotate/cmd/main.go new file mode 100644 index 0000000000000..c1666ce9e045b --- /dev/null +++ b/openshift-hack/e2e/annotate/cmd/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "k8s.io/kubernetes/openshift-hack/e2e/annotate" +) + +func main() { + annotate.Run(annotate.TestMaps, func(name string) bool { return false }) +} diff --git a/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go b/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go new file mode 100644 index 0000000000000..f1d47c01428be --- /dev/null +++ b/openshift-hack/e2e/annotate/generated/zz_generated.annotations.go @@ -0,0 +1,14996 @@ +package generated + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" +) + +var Annotations = map[string]string{ + "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested when ENABLE_CLIENT_GO_WATCH_LIST_ALPHA is set": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (fairness)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (priority)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should support FlowSchema API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] API priority and fairness should support PriorityLevelConfiguration API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a mutating webhook should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update mutating webhook configurations with match conditions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update validating webhook configurations with match conditions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny attaching pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate everything except 'skip-me' configmaps": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject mutating webhook configurations with invalid match conditions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject validating webhook configurations with invalid match conditions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert from CR v1 to CR v2 [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects works [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] custom resource defaulting for requests and from storage works [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] [Flaky] kubectl explain works for CR with the same resource name as built-in object.": " [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD without validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of different groups [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validations rules": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains a x-kubernetes-validations rule that refers to a property that do not exist": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that contains a syntax error": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that exceeds the estimated cost limit": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource that exceeds the runtime cost limit for x-kubernetes-validations rule execution": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail update of a custom resource that does not satisfy a x-kubernetes-validations transition rule": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validations rules": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Discovery Custom resource should have storage version hash": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Discovery should accurately determine present and missing resources": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Discovery should locate the groupVersion and a resource within each APIGroup [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Discovery should validate PreferredVersion for each APIGroup [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Etcd failure [Disruptive] should recover from SIGKILL": " [Serial] [Suite:k8s]", + + "[sig-api-machinery] Etcd failure [Disruptive] should recover from network partition with master": " [Serial] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should create/apply a CR with unknown fields for CRD with no validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should create/apply a valid CR for CRD with validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should create/apply an invalid CR with extra properties for CRD with validation schema [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should detect duplicates in a CR when preserving unknown fields [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should detect unknown and duplicate fields of a typed object [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should detect unknown metadata fields in both the root and embedded object of a CR [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] FieldValidation should detect unknown metadata fields of a typed object [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should delete jobs and pods created by cronjob": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should support cascading deletion of custom resources": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Garbage collector should support orphan deletion of custom resources": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Generated clientset should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Generated clientset should create v1 cronJobs, delete cronJobs, watch cronJobs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should apply a finalizer to a Namespace [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should apply an update to a Namespace [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should apply changes to a namespace status [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should delete fast enough (90 percent of 100 namespaces in 150 seconds)": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should ensure that all pods are removed when a namespace is deleted [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-api-machinery] OpenAPIV3 should contain OpenAPI V3 for Aggregated APIServer": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] OpenAPIV3 should publish OpenAPI V3 for CustomResourceDefinition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] OpenAPIV3 should round trip OpenAPI V3 for all built-in group versions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:ScopeSelectors] should verify ResourceQuota with best effort scope using scope-selectors.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota [Feature:ScopeSelectors] should verify ResourceQuota with terminating scopes through scope selectors.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should apply changes to a resourcequota status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a custom resource.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should manage the lifecycle of a ResourceQuota [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Server request timeout default timeout should be used if the specified timeout in the request URL is 0s": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Server request timeout should return HTTP status code 400 if the user specifies an invalid timeout in the request URL": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Server request timeout the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should create an applied object if it does not already exist": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should give up ownership of a field if forced applied by a controller": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should ignore conflict errors if force apply is used": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should not remove a field if an owner unsets the field but other managers still have ownership of the field": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should remove a field if it is owned but removed in the apply request": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should work for CRDs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ServerSideApply should work for subresources": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for API chunking should return chunks of results for list calls [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for API chunking should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for Table transformation should return chunks of table results for list calls": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for Table transformation should return generic metadata details across all namespaces for nodes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Servers with support for Table transformation should return pod details": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] StorageVersion resources [Feature:StorageVersionAPI] storage version with non-existing id should be GC'ed": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] [FeatureGate:ValidatingAdmissionPolicy] [Beta] should allow expressions to refer variables.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] [FeatureGate:ValidatingAdmissionPolicy] [Beta] should type check a CRD": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] [FeatureGate:ValidatingAdmissionPolicy] [Beta] should type check validation expressions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] [FeatureGate:ValidatingAdmissionPolicy] [Beta] should validate against a Deployment": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json,application/vnd.kubernetes.protobuf\"": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json\"": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf,application/json\"": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf\"": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] health handlers should contain necessary checks": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-api-machinery] kube-apiserver identity [Feature:APIServerIdentity] kube-apiserver identity should persist after restart [Disruptive]": " [Serial] [Suite:k8s]", + + "[sig-api-machinery] server version should find the server version [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ControllerRevision [Serial] should manage the lifecycle of a ControllerRevision [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] CronJob should be able to schedule after more than 100 missed schedule": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should delete failed finished jobs with limit of one job": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should delete successful finished jobs with limit of one successful job": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should not emit unexpected warnings": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should not schedule jobs when suspended [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-apps] CronJob should remove from active list jobs that have been deleted": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] CronJob should replace jobs when ReplaceConcurrent [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] CronJob should schedule multiple jobs concurrently [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] CronJob should support CronJob API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] CronJob should support timezone": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should list and delete a collection of DaemonSets [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should not update pod when spec was updated and update strategy is OnDelete": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should run and stop complex daemon with node affinity": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should surge pods onto nodes when spec was updated and update strategy is RollingUpdate": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] Daemon set [Serial] should verify changes to a daemon set status [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-apps] DaemonRestart [Disruptive] Controller Manager should not create/delete replicas across restart": " [Serial] [Suite:k8s]", + + "[sig-apps] DaemonRestart [Disruptive] Kube-proxy should recover after being killed accidentally": " [Serial] [Suite:k8s]", + + "[sig-apps] DaemonRestart [Disruptive] Kubelet should not restart containers across restart": " [Serial] [Suite:k8s]", + + "[sig-apps] DaemonRestart [Disruptive] Scheduler should continue assigning pods to nodes across restart": " [Serial] [Suite:k8s]", + + "[sig-apps] Deployment Deployment should have a working scale subresource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment deployment reaping should cascade to its replica sets and pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Deployment deployment should delete old replica sets [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment deployment should support proportional scaling [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment deployment should support rollover [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment iterative rollouts should eventually progress": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Deployment should not disrupt a cloud load-balancer's connectivity during rollout": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Deployment should run the lifecycle of a Deployment [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment should validate Deployment Status endpoints [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Deployment test Deployment ReplicaSet orphaning and adoption regarding controllerRef": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces should list and delete a collection of PodDisruptionBudgets [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: enough pods, absolute => should allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage => should allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage => should allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer => should not allow an eviction [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: no PDB => should allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: too few pods, absolute => should not allow an eviction": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage => should not allow an eviction [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] DisruptionController should block an eviction until the PDB is updated to allow it [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] DisruptionController should create a PodDisruptionBudget [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] DisruptionController should observe PodDisruptionBudget status updated [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] DisruptionController should observe that the PodDisruptionBudget status is not updated for unmanaged pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] DisruptionController should update/patch PodDisruptionBudget status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job Using a pod failure policy to not count some failures towards the backoffLimit Ignore DisruptionTarget condition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job Using a pod failure policy to not count some failures towards the backoffLimit Ignore exit code 137": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should allow to use the pod failure policy on exit code to fail the job early": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should allow to use the pod failure policy to not count the failure towards the backoffLimit": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should apply changes to a job status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should create pods for an Indexed job with completion indexes and specified hostname [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should delete a job [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should delete pods when suspended": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should execute all indexes despite some failing when using backoffLimitPerIndex": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should fail to exceed backoffLimit": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should fail when exceeds active deadline": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should manage the lifecycle of a job [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should mark indexes as failed when the FailIndex action is matched in podFailurePolicy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should not create pods when created in suspend state": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should recreate pods only after they have failed if pod replacement policy is set to Failed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should remove pods when job is deleted": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] Job should run a job to completion when tasks sometimes fail and are not locally restarted": " [Flaky] [Suite:k8s]", + + "[sig-apps] Job should run a job to completion when tasks succeed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should run a job to completion with CPU requests [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-apps] Job should terminate job execution when the number of failed indexes exceeds maxFailedIndexes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] Job should update the status ready field": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] ReplicaSet Replace and Patch tests [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet Replicaset should have a working scale subresource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet should adopt matching pods on creation and release no longer matching pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet should list and delete a collection of ReplicaSets [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet should serve a basic image on each replica with a private image": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] ReplicaSet should serve a basic image on each replica with a public image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicaSet should surface a failure condition on a common issue like exceeded quota": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] ReplicaSet should validate Replicaset Status endpoints [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should adopt matching pods on creation [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should get and update a ReplicationController scale [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should release no longer matching pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should serve a basic image on each replica with a private image": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] ReplicationController should serve a basic image on each replica with a public image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Automatically recreate PVC for pending pod when PVC is missing PVC should be recreated when pod is pending due to missing PVC [Disruptive] [Serial]": " [Suite:k8s]", + + "[sig-apps] StatefulSet AvailableReplicas should get updated accordingly when MinReadySeconds is enabled": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should adopt matching orphans and release non-matching pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should implement legacy replacement when the update strategy is OnDelete": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should list, patch and delete a collection of StatefulSets [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should not deadlock when a pod's predecessor fails": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 with failing container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 without failing container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications with PVCs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should provide basic identity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should validate Statefulset Status endpoints [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working CockroachDB cluster": " [Suite:k8s]", + + "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working mysql cluster": " [Suite:k8s]", + + "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working redis cluster": " [Suite:k8s]", + + "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working zookeeper cluster": " [Suite:k8s]", + + "[sig-apps] StatefulSet MinReadySeconds should be honored when enabled": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenDeleted)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenScaled)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a OnScaledown policy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a WhenDeleted policy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Decreasing .start.ordinal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Increasing .start.ordinal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Removing .start.ordinal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Setting .start.ordinal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] TTLAfterFinished job should be deleted once it finishes after TTL seconds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-apps] stateful Upgrade [Feature:StatefulUpgrade] stateful upgrade should maintain a functioning cluster": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-architecture] Conformance Tests should have at least two untainted nodes [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support building a client with a CSR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] SelfSubjectReview should support SelfSubjectReview API operations": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1beta1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] ServiceAccount admission controller migration [Feature:BoundServiceAccountTokenVolume] master upgrade should maintain a functioning cluster": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-auth] ServiceAccounts ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts no secret-based service account token should be auto-generated": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should guarantee kube-root-ca.crt exist in any namespace [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should mount an API token into pods [Conformance]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should mount projected service account token [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow]": " [Suite:k8s]", + + "[sig-auth] ServiceAccounts should update a ServiceAccount [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] SubjectReview should support SubjectReview API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] should be able to mount a single ClusterTrustBundle by name": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthenticator] The kubelet can delegate ServiceAccount tokens to the API server": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthenticator] The kubelet's main port 10250 should reject requests with no credentials": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to create another node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to delete another node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting a secret for a workload the node has access to should succeed": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting an existing configmap should exit with the Forbidden error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-auth] [Feature:NodeAuthorizer] Getting an existing secret should exit with the Forbidden error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaler scalability [Slow] CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down empty nodes [Feature:ClusterAutoscalerScalability3]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale up at all [Feature:ClusterAutoscalerScalability1]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale up twice [Feature:ClusterAutoscalerScalability2]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaler scalability [Slow] shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0 [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0 [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] Should not scale GPU pool up if pod does not require GPUs [GpuType:] [Feature:ClusterSizeAutoscalingGpu]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale down GPU pool from 1 [GpuType:] [Feature:ClusterSizeAutoscalingGpu]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale up GPU pool from 0 [GpuType:] [Feature:ClusterSizeAutoscalingGpu]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale up GPU pool from 1 [GpuType:] [Feature:ClusterSizeAutoscalingGpu]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining multiple pods one by one as dictated by pdb [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down when rescheduling a pod is required and pdb allows for it [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed when there is non autoscaled pool [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] DNS horizontal autoscaling [Serial] [Slow] [KubeUp] kube-dns-autoscaler should scale kube-dns pods when cluster size changed": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-autoscaling] DNS horizontal autoscaling kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-autoscaling] [Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling Autoscaling a service from 1 pod and 3 nodes to 8 pods and >=4 nodes takes less than 15 minutes": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) CustomResourceDefinition Should scale with a CRD targetRef": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light [Slow] Should scale from 2 pods to 1 pod": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should not scale up on a busy sidecar with an idle application": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods on a busy application with an idle sidecar container": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods and verify decision stability": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod and verify decision stability": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale down": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale up": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range over two stabilization windows": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range with stabilization window and pod limit rate": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with long upscale stabilization window should scale up only after the stabilization period": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale down no more than given number of Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale up no more than given number of Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale down no more than given percentage of current Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale up no more than given percentage of current Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period": " [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down to 0": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down with Prometheus": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target average value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Container Resource and External Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Pod and Object Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Pod and External metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Resource and Object metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance]": " [Slow] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl events should show event when pod is created": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl prune with applyset should apply and prune objects": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should create/apply a CR with unknown fields for CRD with no validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should create/apply a valid CR for CRD with validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields in both the root and embedded object of a CR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields of a typed object": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command with --leave-stdin-open": " [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never": " [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never, but with --rm": " [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run running a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod Kubectl run running a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should contain last line of the log": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes should handle in-cluster config": " [Disabled:Broken] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes should support port-forward": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec using resource/name": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support inline execution and attach with websockets or fallback to spdy": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support inline execution and attach": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client kubectl subresource flag GET on status subresource of built-in type (node) returns identical info as GET on the built-in type": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client kubectl subresource flag should not be used in a bulk GET": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete": " [Disabled:Broken] [Suite:k8s]", + + "[sig-cli] Kubectl logs default container logs the second container is the default-container by annotation should log default container if not specified": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl logs logs should be able to retrieve and filter logs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl rollout undo undo should rollback and update deployment env": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Downgrade [Feature:Downgrade] cluster downgrade should maintain a functioning cluster [Feature:ClusterDowngrade]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] GKE node pools [Feature:GKENodePool] should create a cluster with multiple node pools [Feature:GKENodePool]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas different zones [Serial] [Disruptive]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas multizone workers [Serial] [Disruptive]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas same zone [Serial] [Disruptive]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to add nodes": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to delete nodes": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to cadvisor port 4194 using proxy subresource": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to the readonly kubelet port 10255 using proxy subresource": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 10255 open on its all public IP addresses": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 4194 open on its all public IP addresses": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all inbound packets for a while and ensure they function afterwards": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all outbound packets for a while and ensure they function afterwards": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering clean reboot and ensure they function upon restart": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering unclean reboot and ensure they function upon restart": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by switching off the network interface and ensure they function upon switch on": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by triggering kernel panic and ensure they function upon restart": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Recreate [Feature:Recreate] recreate nodes and ensure they function upon restart": " [Disabled:Broken] [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Restart [Disruptive] [KubeUp] should restart all nodes and ensure all nodes and pods recover": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] cluster upgrade should maintain a functioning cluster [Feature:ClusterUpgrade]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] master upgrade should maintain a functioning cluster [Feature:MasterUpgrade]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-cloud-provider-gcp] [Disruptive] NodeLease NodeLease deletion node lease should be deleted when corresponding node is deleted": " [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + + "[sig-cloud-provider] [Feature:CloudProvider] [Disruptive] Nodes should be deleted on API server if it doesn't exist in the cloud provider": " [Serial] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the token secret when the secret expired": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should not delete the token secret when the secret is not expired": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial] [Disruptive]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should sign the new added bootstrap tokens": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-instrumentation] Events API should delete a collection of events [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-instrumentation] Events API should ensure that an event can be fetched, patched, deleted, and listed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-instrumentation] Events should delete a collection of events [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-instrumentation] Events should manage the lifecycle of an event [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-instrumentation] Logging soak [Performance] [Slow] [Disruptive] should survive logging 1KB every 1s seconds, for a duration of 2m0s": " [Serial] [Suite:k8s]", + + "[sig-instrumentation] Metrics should grab all metrics from kubelet /metrics/resource endpoint": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics from API server.": " [Disabled:Broken] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics from a ControllerManager.": " [Disabled:Broken] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics from a Kubelet.": " [Disabled:Broken] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler.": " [Disabled:Broken] [Suite:k8s]", + + "[sig-instrumentation] MetricsGrabber should grab all metrics slis from API server.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-instrumentation] Stackdriver Monitoring should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-instrumentation] Stackdriver Monitoring should have cluster metrics [Feature:StackdriverMonitoring]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-instrumentation] Stackdriver Monitoring should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-instrumentation] Stackdriver Monitoring should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-instrumentation] Stackdriver Monitoring should run Custom Metrics - Stackdriver Adapter for old resource model [Feature:StackdriverCustomMetrics]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-instrumentation] Stackdriver Monitoring should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-network] CVE-2021-29923 IPv4 Service Type ClusterIP with leading zeros should work interpreted as decimal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] ClusterDns [Feature:Example] should create pod that uses dns": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Connectivity Pod Lifecycle should be able to connect from a Pod to a terminating Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-network] Conntrack should be able to preserve UDP traffic when initial unready endpoints get ready": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service and client is hostNetwork": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Conntrack should drop INVALID conntrack entries [Privileged]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy: ClusterFirstWithHostNet [LinuxOnly]": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-network] DNS configMap nameserver Change stubDomain should be able to change stubDomain configuration [Slow] [Serial]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-network] DNS configMap nameserver Forward PTR lookup should forward PTR records lookup to upstream nameserver [Slow] [Serial]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-network] DNS configMap nameserver Forward external name lookup should forward externalname lookup to upstream nameserver [Slow] [Serial]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for ExternalName services [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for pods for Hostname [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for pods for Subdomain [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for services [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for the cluster [Conformance]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should provide DNS for the cluster [Provider:GCE]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should resolve DNS of partial qualified names for the cluster [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS should support configurable pod DNS nameservers [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] DNS should support configurable pod resolv.conf": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] DNS should work with the pod containing more than 6 DNS search paths and longer than 256 search list characters": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] EndpointSlice should create Endpoints and EndpointSlices for Pods matching a Service [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] EndpointSlice should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] EndpointSlice should have Endpoints and EndpointSlices pointing to API Server [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] EndpointSlice should support a Service with multiple endpoint IPs specified in multiple EndpointSlices": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] EndpointSlice should support a Service with multiple ports specified in multiple EndpointSlices": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] EndpointSlice should support creating EndpointSlice API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] EndpointSliceMirroring should mirror a custom Endpoint with multiple subsets and same IP address": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] EndpointSliceMirroring should mirror a custom Endpoints resource through create update and delete [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Firewall rule [Slow] [Serial] should create valid firewall rules for LoadBalancer type service": " [Suite:k8s]", + + "[sig-network] Firewall rule control plane should not expose well-known ports": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Ingress API should support creating Ingress API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] IngressClass API should support creating IngressClass API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] IngressClass [Feature:Ingress] should allow IngressClass to have Namespace-scoped parameters [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] IngressClass [Feature:Ingress] should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] IngressClass [Feature:Ingress] should not set default value if no default IngressClass [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] IngressClass [Feature:Ingress] should set default value on new IngressClass [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] KubeProxy should set TCP CLOSE_WAIT timeout [Privileged]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field": " [Suite:k8s]", + + "[sig-network] LoadBalancers ESIPP [Slow] should only target nodes with endpoints": " [Suite:k8s]", + + "[sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer": " [Suite:k8s]", + + "[sig-network] LoadBalancers ESIPP [Slow] should work for type=NodePort": " [Suite:k8s]", + + "[sig-network] LoadBalancers ESIPP [Slow] should work from pods": " [Suite:k8s]", + + "[sig-network] LoadBalancers should be able to change the type and ports of a TCP service [Slow]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should be able to change the type and ports of a UDP service [Slow]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should be able to create LoadBalancer Service without NodePort and change it [Slow]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should be able to create an internal type load balancer [Slow]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes": " [Disabled:Broken] [Skipped:SingleReplicaTopology] [Suite:k8s]", + + "[sig-network] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes": " [Disabled:Broken] [Skipped:SingleReplicaTopology] [Suite:k8s]", + + "[sig-network] LoadBalancers should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should handle load balancer cleanup finalizer for service [Slow]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should have session affinity work for LoadBalancer service with ESIPP off [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should have session affinity work for LoadBalancer service with ESIPP on [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should not have connectivity disruption during rolling update with externalTrafficPolicy=Cluster [Slow]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should not have connectivity disruption during rolling update with externalTrafficPolicy=Local [Slow]": " [Suite:k8s]", + + "[sig-network] LoadBalancers should only allow access from service loadbalancer source ranges [Slow]": " [Suite:k8s]", + + "[sig-network] Loadbalancing: L7 GCE [Slow] [Feature:Ingress] should conform to Ingress spec": " [Suite:k8s]", + + "[sig-network] Loadbalancing: L7 GCE [Slow] [Feature:NEG] rolling update backend pods should not cause service disruption": " [Suite:k8s]", + + "[sig-network] Loadbalancing: L7 GCE [Slow] [Feature:NEG] should be able to create a ClusterIP service": " [Suite:k8s]", + + "[sig-network] Loadbalancing: L7 GCE [Slow] [Feature:NEG] should be able to switch between IG and NEG modes": " [Suite:k8s]", + + "[sig-network] Loadbalancing: L7 GCE [Slow] [Feature:NEG] should conform to Ingress spec": " [Suite:k8s]", + + "[sig-network] Loadbalancing: L7 GCE [Slow] [Feature:NEG] should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise": " [Suite:k8s]", + + "[sig-network] Loadbalancing: L7 GCE [Slow] [Feature:NEG] should sync endpoints for both Ingress-referenced NEG and standalone NEG": " [Suite:k8s]", + + "[sig-network] Loadbalancing: L7 GCE [Slow] [Feature:NEG] should sync endpoints to NEG": " [Suite:k8s]", + + "[sig-network] Loadbalancing: L7 Scalability GCE [Slow] [Serial] [Feature:IngressScale] Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses": " [Suite:k8s]", + + "[sig-network] Netpol API should support creating NetworkPolicy API operations": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol API should support creating NetworkPolicy API with endport field": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated namespace [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated pod [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy]": " [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should deny egress from all pods in a namespace [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should deny egress from pods based on PodSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Feature:UDP]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol NetworkPolicy between server and client should work with Ingress, Egress specified together [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy based on Ports [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy based on Ports [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]": " [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should be able to handle large requests: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should be able to handle large requests: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: http [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: udp [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: sctp [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for multiple endpoint-Services with same selector": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for node-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for node-Service: sctp [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for node-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for pod-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for pod-Service: sctp [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for pod-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should function for service endpoints using hostNetwork": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should support basic nodePort: udp functionality": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should update endpoints: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should update endpoints: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should update nodePort: http [Slow]": " [Suite:k8s]", + + "[sig-network] Networking Granular Checks: Services should update nodePort: udp [Slow]": " [Suite:k8s]", + + "[sig-network] Networking IPerf2 [Feature:Networking-Performance] should run iperf2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking should allow creating a Pod with an SCTP HostPort [LinuxOnly] [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] Networking should check kube-proxy urls": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4]": " [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv6] [Experimental][LinuxOnly]": " [Disabled:Broken] [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:k8s]", + + "[sig-network] Networking should provide unchanging, static URL paths for kubernetes api services": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Networking should provider Internet connection for containers using DNS [Feature:Networking-DNS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Networking should recreate its iptables rules if they are deleted [Disruptive]": " [Serial] [Suite:k8s]", + + "[sig-network] NoSNAT [Feature:NoSNAT] [Slow] Should be able to send traffic between Pods without SNAT": " [Suite:k8s]", + + "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service Proxy [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service ProxyWithPath [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Proxy version v1 should proxy logs on node using proxy subresource ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Service endpoints latency should not be very high [Conformance]": " [Serial] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-network] Services GCE [Slow] should be able to create and tear down a standard-tier load balancer [Slow]": " [Suite:k8s]", + + "[sig-network] Services should allow creating a basic SCTP service with pod and endpoints [LinuxOnly] [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-network] Services should allow pods to hairpin back to themselves through services": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to change the type from ExternalName to NodePort [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to change the type from NodePort to ExternalName [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be able to create a functioning NodePort service [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should be able to up and down services": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Services should be able to update service type to NodePort listening on same port number but different protocols": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be possible to connect to a service via ExternalIP when the external IP is not assigned to a node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be rejected for evicted pods (no endpoints exist)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be rejected when no endpoints exist": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should be updated after adding or deleting ports ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should check NodePort out-of-range": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should complete a service status lifecycle [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should create endpoints for unready pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should delete a collection of services [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should fail health check node port if there are only terminating endpoints": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should find a service from listing all namespaces [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should have session affinity work for NodePort service [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should implement service.kubernetes.io/headless": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Services should implement service.kubernetes.io/service-proxy-name": " [Disabled:Broken] [Suite:k8s]", + + "[sig-network] Services should not be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is false": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should preserve source pod IP for traffic thru service cluster IP [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should prevent NodePort collisions": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should provide secure master service [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should release NodePorts on delete": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should respect internalTrafficPolicy=Local Pod and Node, to Pod (hostNetwork: true)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should respect internalTrafficPolicy=Local Pod to Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should serve a basic endpoint from pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should serve endpoints on same port and different protocol for internal traffic on Type LoadBalancer ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] Services should serve endpoints on same port and different protocols [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should serve multiport endpoints from pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should test the lifecycle of an Endpoint [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-network] Services should work after restarting apiserver [Disruptive]": " [Serial] [Suite:k8s]", + + "[sig-network] Services should work after restarting kube-proxy [Disruptive]": " [Serial] [Suite:k8s]", + + "[sig-network] Services should work after the service has been recreated": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: http [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: udp [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: sctp [Feature:SCTPConnectivity]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for service endpoints using hostNetwork": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: http": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: udp": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should be able to reach pod on ipv4 and ipv6 ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create a single stack service with cluster ip from primary service range": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to host ips [Feature:PodHostIPs]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to pod ips": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create service with ipv4 cluster ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create service with ipv4,v6 cluster ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create service with ipv6 cluster ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should create service with ipv6,v4 cluster ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:IPv6DualStack] should have ipv4 and ipv6 internal node ip": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-network] [Feature:PerformanceDNS] [Serial] Should answer DNS query for maximum number of services per cluster": " [Slow] [Suite:k8s]", + + "[sig-network] [Feature:ServiceCIDRs] should create Services and servce on different Service CIDRs": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-network] [Feature:Topology Hints] should distribute endpoints evenly": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-network] kube-proxy migration [Feature:KubeProxyDaemonSetMigration] Downgrade kube-proxy from a DaemonSet to static pods should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-network] kube-proxy migration [Feature:KubeProxyDaemonSetMigration] Upgrade kube-proxy from static pods to a DaemonSet should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]": " [Disabled:Unimplemented] [Suite:k8s]", + + "[sig-node] AppArmor load AppArmor profiles can disable an AppArmor profile, using unconfined": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] ConfigMap should be consumable via environment variable [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] ConfigMap should run through a ConfigMap lifecycle [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] ConfigMap should update ConfigMap successfully": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret [NodeConformance]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull image [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull from private registry without secret [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull image from invalid registry [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Containers should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Containers should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster truncates the name of a generated resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor must deallocate after use when using delayed allocation": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor supports scheduled pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor supports scheduled pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and not setting ReservedFor supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor must deallocate after use when using delayed allocation": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor supports scheduled pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor supports scheduled pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with delayed allocation and setting ReservedFor supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation must deallocate after use when using delayed allocation": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation supports scheduled pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation supports scheduled pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with immediate allocation supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with local unshared resources reuses an allocated immediate claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with shared network resources shares an allocated immediate claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with DRA driver controller with structured parameters must manage ResourceSlices [Slow]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet with ConfigMap parameters must not run a pod if a claim is not reserved for it": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet with ConfigMap parameters must retry NodePrepareResources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet with ConfigMap parameters must skip NodePrepareResource if not used by any container": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet with ConfigMap parameters must unprepare resources for force-deleted pod": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet with ConfigMap parameters registers plugin": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] multiple drivers using both drapbv1alpha2 and drapbv1alpha3 work": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] multiple drivers using only drapbv1alpha2 work": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] multiple drivers using only drapbv1alpha3 work": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on multiple nodes reallocation works": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on multiple nodes with network-attached resources [Serial] [Disruptive] [Slow] must deallocate on non graceful node shutdown": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on multiple nodes with network-attached resources schedules onto different nodes": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on multiple nodes with node-local resources with delayed allocation uses all resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on multiple nodes with node-local resources with immediate allocation uses all resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node retries pod scheduling after creating resource class": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node retries pod scheduling after updating resource class": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node runs a pod without a generated resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node supports claim and class parameters": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node supports reusing resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node supports sharing a claim concurrently": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node supports sharing a claim sequentially [Slow]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation must deallocate after use when using delayed allocation": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with delayed allocation supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation must deallocate after use when using delayed allocation": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with ConfigMap parameters on single node with immediate allocation supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on multiple nodes with node-local resources with delayed allocation uses all resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on multiple nodes with node-local resources with immediate allocation uses all resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node retries pod scheduling after creating resource class": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node retries pod scheduling after updating resource class": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node runs a pod without a generated resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports claim and class parameters": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports reusing resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports sharing a claim concurrently": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports sharing a claim sequentially [Slow]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation must deallocate after use when using delayed allocation": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with delayed allocation supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation must deallocate after use when using delayed allocation": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node with immediate allocation supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on multiple nodes with node-local resources with delayed allocation uses all resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on multiple nodes with node-local resources with immediate allocation uses all resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node retries pod scheduling after creating resource class": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node retries pod scheduling after updating resource class": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node runs a pod without a generated resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node supports claim and class parameters": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node supports reusing resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node supports sharing a claim concurrently": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node supports sharing a claim sequentially [Slow]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation must deallocate after use when using delayed allocation": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with delayed allocation supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation deletes generated claims when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation does not delete generated claims when pod is restarting": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation must deallocate after use when using delayed allocation": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation removes reservation from claim when pod is done": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation supports external claim referenced by multiple containers of multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation supports external claim referenced by multiple pods": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation supports init containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation supports inline claim referenced by multiple containers": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation supports simple pod referencing external resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] DRA [Feature:DynamicResourceAllocation] with translated parameters on single node with immediate allocation supports simple pod referencing inline resource claim": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide container's limits.hugepages- and requests.hugepages- as env vars": " [Suite:k8s]", + + "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide default limits.hugepages- from node allocatable": " [Suite:k8s]", + + "[sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Downward API should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Downward API should provide pod UID as env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Ephemeral Containers [NodeConformance] should update the ephemeral containers in an existing pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Ephemeral Containers [NodeConformance] will start an ephemeral container in an existing pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] ImageCredentialProvider [Feature:KubeletCredentialProviders] should be able to create pod with image credentials fetched from external credential provider ": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] InitContainer [NodeConformance] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet [Serial] [Slow] experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking] resource tracking for 100 pods per node": " [Suite:k8s]", + + "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 0 pods per node": " [Suite:k8s]", + + "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 100 pods per node": " [Suite:k8s]", + + "[sig-node] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Kubelet when scheduling an agnhost Pod with hostAliases should write entries to /etc/hosts [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Lease lease API should be available [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Mount propagation should propagate mounts within defined scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] only evicts pods without tolerations from tainted nodes": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] doesn't evict pod with tolerations from tainted nodes": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] eventually evict pod with finite tolerations from tainted nodes": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] evicts pods from tainted nodes": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] pods evicted from tainted nodes have pod disruption condition": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:k8s]", + + "[sig-node] NodeLease NodeLease should have OwnerReferences set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] NodeLease NodeLease the kubelet should create and update a lease in the kube-node-lease namespace": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] NodeLease NodeLease the kubelet should report node status infrequently": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] NodeProblemDetector should run without error": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] BestEffort pod - try requesting memory, expect error": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container - decrease CPU (RestartContainer) & memory (NotRequired)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase CPU limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase memory limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory limits only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase CPU limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase memory limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU limits only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease CPU limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease memory limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory limits only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease CPU limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease memory limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and limits": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests only": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu & memory requests - decrease memory request": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - decrease c1 resources, increase c2 resources, no change for c3 (net increase for pod)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - increase c1 resources, no change for c2, decrease c3 resources (no net change for pod)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - no change for c1, increase c2 resources, decrease c3 (net decrease for pod)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU & increase memory": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU & memory": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU & decrease memory": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU & memory": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3)": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] pod-resize-resource-quota-test": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Pod garbage collector [Feature:PodGarbageCollector] [Slow] should handle the creation of 1000 pods": " [Suite:k8s]", + + "[sig-node] PodOSRejection [NodeConformance] Kubelet should reject pod when the node OS doesn't match pod's OS": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] PodTemplates should delete a collection of pod templates [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] PodTemplates should replace a pod template [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods Extended Delete Grace Period should be submitted and removed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod Container Status should never report container start when an init container fails": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod Container Status should never report success for a pending container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod Container lifecycle evicted pods should be terminal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod Container lifecycle should not create extra sandbox if all containers are done": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pod TerminationGracePeriodSeconds is negative pod with negative grace period": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods Extended Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should be submitted and removed [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should be updated [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should cap back-off at MaxContainerBackOff [Slow] [NodeConformance]": " [Suite:k8s]", + + "[sig-node] Pods should contain environment variables for services [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should delete a collection of pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should get a host IP [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should have their auto-restart back-off timer reset on image update [Slow] [NodeConformance]": " [Suite:k8s]", + + "[sig-node] Pods should patch a pod status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should run through the lifecycle of Pods and PodStatus [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should support pod readiness gates [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Pods should support remote command execution over websockets [NodeConformance] [Conformance]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance]": " [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] PreStop graceful pod terminated should wait until preStop hook completes the process": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] PreStop should call prestop when killing a pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] PrivilegedPod [NodeConformance] should enable privileged commands [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted by liveness probe because startup probe delays it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a GRPC liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a non-local redirect http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should be ready immediately after startupProbe succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted by liveness probe after startup probe enables it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted startup probe fails": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a GRPC liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a failing exec liveness probe that took longer than the timeout": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with a local redirect http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should override timeoutGracePeriodSeconds when LivenessProbe field is set [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container should override timeoutGracePeriodSeconds when StartupProbe field is set [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with conflicting node selector": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should reject a Pod requesting a non-existent RuntimeClass [NodeConformance] [Conformance]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling with taints [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling without taints ": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] SSH should SSH to all nodes and run commands": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Secrets should be consumable via the environment [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Secrets should fail to create secret due to empty secret key [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Secrets should patch a secret [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsNonRoot should not run with an explicit root user ID [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsNonRoot should not run without a specified user ID": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsNonRoot should run with an explicit non-root user ID [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsNonRoot should run with an image specified user ID": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 0 [LinuxOnly] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with HostUsers must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesSupport]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with HostUsers must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesSupport]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with HostUsers should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with HostUsers should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with privileged should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context should support container.SecurityContext.RunAsUser [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support seccomp default which is unconfined [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support seccomp runtime/default [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support seccomp unconfined on the container [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support seccomp unconfined on the pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context should support volume SELinux relabeling [Flaky] [LinuxOnly]": " [Suite:k8s]", + + "[sig-node] Security Context should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly]": " [Suite:k8s]", + + "[sig-node] Security Context should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly]": " [Suite:k8s]", + + "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when true [LinuxOnly] [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Security Context when if the container's primary UID belongs to some groups in the image [LinuxOnly] should add pod.Spec.SecurityContext.SupplementalGroups to them [LinuxOnly] in resultant supplementary groups for the container processes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls [MinimumKubeletVersion:1.21] [Environment:NotInUserNS] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls with slashes as separator [MinimumKubeletVersion:1.23] [Environment:NotInUserNS]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should allow substituting values in a container's command [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should allow substituting values in a volume subpath [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-node] Variable Expansion should fail substituting values in a volume subpath with absolute path [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-node] Variable Expansion should fail substituting values in a volume subpath with backticks [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-node] Variable Expansion should succeed in writing subpaths in container [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-node] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance]": " [Suite:k8s]", + + "[sig-node] [Feature:Example] Downward API should create a pod that prints his name and namespace": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Feature:Example] Liveness liveness pods should be automatically restarted": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Feature:Example] Secret should create a pod that reads a secret": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action reduce GracePeriodSeconds during runtime": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted by liveness probe because startup probe delays it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a /healthz http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a GRPC liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a non-local redirect http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a tcp:8080 liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be ready immediately after startupProbe succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted by liveness probe after startup probe enables it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted startup probe fails": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a /healthz http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a GRPC liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a exec \"cat /tmp/health\" liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a failing exec liveness probe that took longer than the timeout": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a local redirect http liveness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should have monotonically increasing restart count": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when LivenessProbe field is set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when StartupProbe field is set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe should not be ready before initial delay and never restart": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe that fails should never be ready and never restart": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [NodeAlphaFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-node] [Serial] Pod InPlace Resize Container (scheduler-focused) [Feature:InPlacePodVerticalScaling] pod-resize-scheduler-tests": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] crictl should be able to run crictl on the node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-node] gpu Upgrade [Feature:GPUUpgrade] cluster downgrade should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] gpu Upgrade [Feature:GPUUpgrade] cluster upgrade should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] gpu Upgrade [Feature:GPUUpgrade] master upgrade should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-node] kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s.": " [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-node] kubelet host cleanup with volume mounts [HostCleanup][Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (active) client pod, the NFS mount and the pod's UID directory should be removed.": " [Suite:k8s]", + + "[sig-node] kubelet host cleanup with volume mounts [HostCleanup][Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (sleeping) client pod, the NFS mount and the pod's UID directory should be removed.": " [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] [LinuxOnly] should return the error with an empty --query option": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] [LinuxOnly] should return the kubelet logs ": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] [LinuxOnly] should return the kubelet logs for the current boot with the pattern container": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] [LinuxOnly] should return the kubelet logs for the current boot": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] [LinuxOnly] should return the kubelet logs since the current date and time": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] [LinuxOnly] should return the last three lines of the kubelet logs": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-scheduling] GPUDevicePluginAcrossRecreate [Feature:Recreate] run Nvidia GPU Device Plugin tests with a recreation": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-scheduling] LimitRange should list, patch and delete a LimitRange by collection [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-scheduling] Multi-AZ Clusters should spread the pods of a replication controller across zones [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-scheduling] Multi-AZ Clusters should spread the pods of a service across zones [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] PodTopologySpread Filtering validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates Pods with non-empty schedulingGates are blocked on scheduling": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates pod overhead is considered along with resource limits of pods that are allowed to run verify pod overhead is accounted for": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeAffinity is respected if not matching": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that required NodeAffinity setting is respected if matching": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if matching": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if not matching": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]": " [Slow] [Suite:k8s]", + + "[sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] PodTopologySpread Preemption validates proper pods are preempted": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-scheduling] SchedulerPreemption [Serial] validates pod disruption condition is added to the preempted pod": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPriorities [Serial] Pod should be preferably scheduled to nodes pod can tolerate": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPriorities [Serial] Pod should be scheduled to node that don't match the PodAntiAffinity terms": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] SchedulerPriorities [Serial] PodTopologySpread Scoring validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-scheduling] [Feature:GPUDevicePlugin] run Nvidia GPU Device Plugin tests": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should not pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should add SELinux mount option to existing mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for CSI driver that does not support SELinux mount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for Pod without SELinux context": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for RWO volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not unstage volume when starting a second pod with the same SELinux context": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should pass SELinux mount option for RWOP volume and Pod with SELinux context set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage volume when starting a second pod with different SELinux context": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Feature:SELinuxMountReadWriteOncePod] [Serial] error is bumped on two Pods with a different context on RWOP volume": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Feature:SELinuxMountReadWriteOncePod] [Serial] warning is bumped on two Pods with a different context on RWO volume": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Feature:SELinuxMountReadWriteOncePod] [Serial] warning is not bumped on two Pods with the same context on RWO volume": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI CSIDriver deployment after pod creation using non-attachable mock driver should bringup pod after deploying CSIDriver attach=false [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should not require VolumeAttach for drivers without attachment": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should preserve attachment policy when no CSIDriver present": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for drivers with attachment": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for ephemermal volume and drivers with attachment": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=off, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=on, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume without restarting pod if nodeExpansion=off": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not expand volume if resizingOnDriver=off, resizingOnSC=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not have staging_path missing in node expand volume pod if attach=on, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=off, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=on, nodeExpansion=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion CSI online volume expansion with secret should expand volume without restarting pod if attach=on, nodeExpansion=on, csiNodeExpandSecret=on": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should not be possible in partially expanded volumes": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with final error": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should record target size in allocated resources": " [Disabled:Alpha] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=File": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=default": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should not modify fsGroup if fsGroupPolicy=None": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for generic ephemeral volume when persistent volume is attached [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for persistent volume when generic ephemeral volume is attached [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit when limit is bigger than 0 [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage ephemeral error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage success": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should not call NodeUnstage after NodeStage final error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage ephemeral error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage final error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] should call NodeStage after NodeUnstage success": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage final error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage transient error": " [Suite:k8s]", + + "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should be plumbed down when csiServiceAccountTokenEnabled=true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when CSIDriver is not deployed": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when csiServiceAccountTokenEnabled=false": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit dynamic CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit pre-provisioned CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots [Feature:VolumeSnapshotDataSource] volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots secrets [Feature:VolumeSnapshotDataSource] volume snapshot create/delete with secrets": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity disabled": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity unused": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, have capacity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, insufficient capacity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, no capacity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, immediate binding": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, no topology": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, with topology": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock volume storage capacity storage capacity unlimited": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI workload information using mock driver contain ephemeral=true when using inline volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI workload information using mock driver should be passed when podInfoOnMount=true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when CSIDriver does not exist": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when podInfoOnMount=false": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when podInfoOnMount=nil": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral)": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] CSIInlineVolumes should run through the lifecycle of a CSIDriver [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] CSIInlineVolumes should support CSIVolumeSource in Pod API [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] CSIStorageCapacity should support CSIStorageCapacities API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap Should fail non-optional pod creation due to configMap object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] ConfigMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] ConfigMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap should be immutable if `immutable` field is set [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars": " [Suite:k8s]", + + "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide default limits.ephemeral-storage from node allocatable": " [Suite:k8s]", + + "[sig-storage] Downward API volume should provide container's cpu limit [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide container's cpu request [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should update annotations on modification [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Downward API volume should update labels on modification [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by changing the default annotation [Serial] [Disruptive]": " [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by removing the default annotation [Serial] [Disruptive]": " [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should create and delete default persistent volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner External should let an external dynamic provisioner create and delete persistent volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] deletion should be idempotent": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with different parameters": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with non-default reclaim policy Retain": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should test that deleting a claim before the volume is provisioned deletes the volume.": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes pod should support memory backed volumes of specified size": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] files with FSGroup ownership should support (root,0644,tmpfs)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is root": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] nonexistent volume subPath should have the correct mode and owner using FSGroup": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on default medium should have the correct mode using FSGroup": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on tmpfs should have the correct mode using FSGroup": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + + "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for git_repo [Serial] [Slow]": " [Suite:k8s]", + + "[sig-storage] EmptyDir wrapper volumes should not conflict [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : configmap": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : projected": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : secret": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Flexvolumes should be mountable when attachable [Feature:Flexvolumes]": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Flexvolumes should be mountable when non-attachable": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] HostPath should give a volume the correct mode [LinuxOnly] [NodeConformance]": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] HostPath should support r/w [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] HostPath should support subPath [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting non-existent block device 'does-not-exist-blk-dev' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting non-existent character device 'does-not-exist-char-dev' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Directory [Slow] Should fail on mounting non-existent directory 'does-not-exist-dir' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType File [Slow] Should fail on mounting non-existent file 'does-not-exist-file' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathUnset": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting non-existent socket 'does-not-exist-socket' when HostPathType is HostPathSocket": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathBlockDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathCharDev": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathDirectory": " [Suite:k8s]", + + "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathFile": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: ceph] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Broken] [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Disabled:Unsupported] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: rbd] [Feature:Volumes] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS]": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume": " [Suite:k8s]", + + "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data": " [Suite:k8s]", + + "[sig-storage] Mounted volume expand [Feature:StorageProvider] Should verify mounted devices can be resized": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Multi-AZ Cluster Volumes should schedule pods in the same zones as statically provisioned PVs": " [Disabled:Broken] [Suite:k8s]", + + "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kube-controller-manager restarts should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash": " [Serial] [Suite:k8s]", + + "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart.": " [Serial] [Suite:k8s]", + + "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.": " [Serial] [Suite:k8s]", + + "[sig-storage] Node Poweroff [Feature:vsphere] [Slow] [Disruptive] verify volume status after node power off": " [Disabled:Unsupported] [Serial] [Suite:k8s]", + + "[sig-storage] Node Unregister [Feature:vsphere] [Slow] [Disruptive] node unregister": " [Disabled:Unsupported] [Serial] [Suite:k8s]", + + "[sig-storage] PV Protection Verify \"immediate\" deletion of a PV that is not bound to a PVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PV Protection Verify that PV bound to a PVC is not removed immediately": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PVC Protection Verify \"immediate\" deletion of a PVC that is not in active use by a pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PVC Protection Verify that PVC in active use by a pod is not removed immediately": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PVC Protection Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Persistent Volume Claim and StorageClass Retroactive StorageClass assignment [Serial] [Disruptive] should assign default SC to PVCs that have no SC set": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes CSI Conformance should apply changes to a pv/pvc status [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] PersistentVolumes CSI Conformance should run through the lifecycle of a PV and a PVC [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] PersistentVolumes Default StorageClass [LinuxOnly] pods that use multiple volumes should be reschedulable [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes GCEPD [Feature:StorageProvider] should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes GCEPD [Feature:StorageProvider] should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes GCEPD [Feature:StorageProvider] should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS when invoking the Recycle reclaim policy should test that a PV becomes Available and is clean after the PVC is deleted.": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp is set [Feature:PersistentVolumeLastPhaseTransitionTime]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp multiple updates [Feature:PersistentVolumeLastPhaseTransitionTime]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV: test phase transition timestamp is set and phase is Available [Feature:PersistentVolumeLastPhaseTransitionTime]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and a pre-bound PV: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and non-pre-bound PV: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs should create a non-pre-bound PV and PVC: test write access ": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 2 PVs and 4 PVCs: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 3 PVs and 3 PVCs: test write access": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes [Feature:vsphere] [Feature:LabelSelector] Selector-Label Volume Binding:vsphere [Feature:vsphere] should bind volume with claim for given label": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] PersistentVolumes [Feature:vsphere] [Feature:ReclaimPolicy] persistentvolumereclaim:vsphere [Feature:vsphere] should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] PersistentVolumes [Feature:vsphere] [Feature:ReclaimPolicy] persistentvolumereclaim:vsphere [Feature:vsphere] should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] PersistentVolumes [Feature:vsphere] [Feature:ReclaimPolicy] persistentvolumereclaim:vsphere [Feature:vsphere] should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-expansion loopback local block volume should support online expansion on node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to non-existent path": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to wrong node": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeAffinity": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeSelector": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod has affinity": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod management is parallel and pod has affinity": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod has anti-affinity": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod management is parallel and pod has anti-affinity": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local Stress with local volumes [Serial] should be able to process many pods and reuse local volumes": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Skipped:gce] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and read from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and write from pod1": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set fsGroup for one pod [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow]": " [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] PersistentVolumes:vsphere [Feature:vsphere] should test that a file written to the vsphere volume mount before kubelet restart can be read after restart [Disruptive]": " [Disabled:Unsupported] [Serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes:vsphere [Feature:vsphere] should test that a vsphere volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]": " [Disabled:Unsupported] [Serial] [Suite:k8s]", + + "[sig-storage] PersistentVolumes:vsphere [Feature:vsphere] should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] PersistentVolumes:vsphere [Feature:vsphere] should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] PersistentVolumes:vsphere [Feature:vsphere] should test that deleting the PV before the pod does not cause pod deletion to fail on vsphere volume detach": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] [Serial] attach on previously attached volumes should work": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] detach in a disrupted environment [Slow] [Disruptive] when node's API object is deleted": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] detach in a disrupted environment [Slow] [Disruptive] when pod is evicted": " [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession [Slow] using 1 containers and 2 PDs": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession [Slow] using 4 containers and 1 PDs": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] schedule pods each with a PD, delete pod and verify detach [Slow] for RW PD with pod delete grace period of \"default (30s)\"": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] schedule pods each with a PD, delete pod and verify detach [Slow] for RW PD with pod delete grace period of \"immediate (0s)\"": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] schedule pods each with a PD, delete pod and verify detach [Slow] for read-only PD with pod delete grace period of \"default (30s)\"": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] schedule pods each with a PD, delete pod and verify detach [Slow] for read-only PD with pod delete grace period of \"immediate (0s)\"": " [Skipped:NoOptionalCapabilities] [Suite:k8s]", + + "[sig-storage] Pod Disks [Feature:StorageProvider] should be able to delete a non-existent PD without error": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected combined should project all components that make up the projection API [Projection] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap Should fail non-optional pod creation due to configMap object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Projected configMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide container's cpu limit [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide container's memory request [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret Should fail non-optional pod creation due to secret object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Projected secret Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume with mappings [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Regional PD RegionalPD should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]": " [Serial] [Suite:k8s]", + + "[sig-storage] Regional PD RegionalPD should provision storage [Slow]": " [Suite:k8s]", + + "[sig-storage] Regional PD RegionalPD should provision storage in the allowedTopologies [Slow]": " [Suite:k8s]", + + "[sig-storage] Regional PD RegionalPD should provision storage in the allowedTopologies with delayed binding [Slow]": " [Suite:k8s]", + + "[sig-storage] Regional PD RegionalPD should provision storage with delayed binding [Slow]": " [Suite:k8s]", + + "[sig-storage] Secrets Should fail non-optional pod creation due to secret object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Secrets Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]": " [Suite:k8s]", + + "[sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Secrets should be immutable if `immutable` field is set [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] StaticPods [Feature:Kind] should run after kubelet stopped with CSI volume mounted [Disruptive] [Serial]": " [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Storage Policy Based Volume Provisioning [Feature:vsphere] verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] StorageClasses CSI Conformance should run through the lifecycle of a StorageClass [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with downward pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-storage] Subpath Container restart should verify that container can restart successfully after configmaps modified": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Verify Volume Attach Through vpxd Restart [Feature:vsphere] [Serial] [Disruptive] verify volume remains attached through vpxd restart": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Attach Verify [Feature:vsphere] [Serial] [Disruptive] verify volume remains attached after master kubelet restart": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Disk Format [Feature:vsphere] verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Disk Format [Feature:vsphere] verify disk format type - thin is honored for dynamically provisioned pv using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Disk Format [Feature:vsphere] verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Disk Size [Feature:vsphere] verify dynamically provisioned pv has size rounded up correctly": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume FStype [Feature:vsphere] verify fstype - default value should be ext4": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume FStype [Feature:vsphere] verify fstype - ext3 formatted volume": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume FStype [Feature:vsphere] verify invalid fstype": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Operations Storm [Feature:vsphere] should create pod with many volumes and verify no attach call fails": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Placement [Feature:vsphere] should create and delete pod with multiple volumes from different datastore": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Placement [Feature:vsphere] should create and delete pod with multiple volumes from same datastore": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Placement [Feature:vsphere] should create and delete pod with the same volume source attach/detach to different worker nodes": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Placement [Feature:vsphere] should create and delete pod with the same volume source on the same worker node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Placement [Feature:vsphere] test back to back pod creation and deletion with different volume sources on the same worker node": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Provisioning On Clustered Datastore [Feature:vsphere] verify dynamic provision with default parameter on clustered datastore": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Provisioning On Clustered Datastore [Feature:vsphere] verify dynamic provision with spbm policy on clustered datastore": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Provisioning On Clustered Datastore [Feature:vsphere] verify static provisioning on clustered datastore": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volume Provisioning on Datastore [Feature:vsphere] verify dynamically provisioned pv using storageclass fails on an invalid datastore": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Volumes ConfigMap should be mountable": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Volumes NFSv3 should be mountable for NFSv3": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Volumes NFSv4 should be mountable for NFSv4": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with incompatible datastore and zone combination specified in storage class fails": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify PVC creation with invalid zone specified in storage class fails": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod fails to get scheduled when conflicting volume topology (allowedTopologies) and pod scheduling constraints(nodeSelector) are specified": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] Zone Support [Feature:vsphere] Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] [Feature:Flexvolumes] Detaching volumes should not work when mount is in progress [Slow]": " [Suite:k8s]", + + "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume expand [Slow] Should verify mounted flex volumes can be resized": " [Suite:k8s]", + + "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume volume expand [Slow] should be resizable when mounted": " [Suite:k8s]", + + "[sig-storage] [Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [LinuxOnly] NonGracefulNodeShutdown [NonGracefulNodeShutdown] pod that uses a persistent volume via gce pd driver should get immediately rescheduled to a different node after non graceful node shutdown ": " [Serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total number of volumes in A/D Controller": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total time taken in volume operations in P/V Controller": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning and attach/detach": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning errors [Slow]": " [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics in Volume Manager": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct BlockMode PVC ref": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct FilesystemMode PVC ref": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create metrics for total number of volumes in A/D Controller": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create metrics for total time taken in volume operations in P/V Controller": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning and attach/detach": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning errors [Slow]": " [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create volume metrics in Volume Manager": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct BlockMode PVC ref": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct FilesystemMode PVC ref": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create none metrics for pvc controller before creating any PV or PVC": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create total pv count metrics for with plugin and volume mode labels after creating pv": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create unbound pv count metrics for pvc controller after creating pv only": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller after creating pvc only": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-storage] vcp at scale [Feature:vsphere] vsphere scale tests": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage] vcp-performance [Feature:vsphere] vcp performance tests": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-windows] Hybrid cluster network for all supported CNIs should have stable networking for Linux and Windows pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection for Linux containers using DNS [Feature:Networking-DNS]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection for Windows containers using DNS [Feature:Networking-DNS]": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] Services should be able to create a functioning NodePort service for Windows": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:GPUDevicePlugin] Device Plugin should be able to create a functioning device plugin for Windows": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container command path validation": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container stats validation": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers metrics should report count of started and failed to start HostProcess containers": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as a process on the host/node": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as localgroup accounts": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support init containers": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support querying api-server using in-cluster config": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support various volume mount types": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:WindowsHyperVContainers] HyperV containers should start a hyperv isolated container": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Cpu Resources [Serial] Container limits should not be exceeded after waiting 2 minutes": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] DNS should support configurable pod DNS servers": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Density [Serial] [Slow] create a batch of pods latency/resource should be within limit when create 10 pods with 0s interval": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support can read and write file to remote SMB folder": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support works end to end": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] GMSA Kubelet [Slow] kubelet GMSA support when creating a pod with correct GMSA credential specs passes the credential specs down to the Pod's containers": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when running 3 pods should return within 10 seconds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when windows is booted should return bootid within 10 seconds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Kubelet-Stats [Serial] Kubelet stats collection for Windows nodes when running 10 pods should return within 10 seconds": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] Allocatable node memory should be equal to a calculated allocatable memory value": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] attempt to deploy past allocatable memory limits should fail deployments of pods once there isn't enough memory": " [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should be able create pods and run containers with a given username": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should ignore Linux Specific SecurityContext if set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as CONTAINERADMINISTRATOR when runAsNonRoot is true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as ContainerAdministrator when runAsNonRoot is true": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Container level": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Pod level": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] SecurityContext should override SecurityContext username if set": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on emptyDir": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on hostMapPath": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-windows] [Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletVersion:1.22] RebootHost containers [Serial] [Disruptive] [Slow] should run as a reboot process on the host/node": " [Suite:k8s]", +} + +func init() { + ginkgo.GetSuite().SetAnnotateFn(func(name string, node types.TestSpec) { + if newLabels, ok := Annotations[name]; ok { + node.AppendText(newLabels) + } else { + panic(fmt.Sprintf("unable to find test %s", name)) + } + }) +} diff --git a/openshift-hack/e2e/annotate/rules.go b/openshift-hack/e2e/annotate/rules.go new file mode 100644 index 0000000000000..27f16410e5fe1 --- /dev/null +++ b/openshift-hack/e2e/annotate/rules.go @@ -0,0 +1,442 @@ +package annotate + +import ( + // ensure all the ginkgo tests are loaded + _ "k8s.io/kubernetes/openshift-hack/e2e" +) + +var ( + TestMaps = map[string][]string{ + // alpha features that are not gated + "[Disabled:Alpha]": { + `\[Feature:StorageVersionAPI\]`, + `\[Feature:InPlacePodVerticalScaling\]`, + `\[Feature:ServiceCIDRs\]`, + `\[Feature:ClusterTrustBundle\]`, + `\[Feature:SELinuxMount\]`, + `\[FeatureGate:SELinuxMount\]`, + `\[Feature:UserNamespacesPodSecurityStandards\]`, + `\[Feature:UserNamespacesSupport\]`, // disabled Beta + `\[Feature:DynamicResourceAllocation\]`, + `\[Feature:VolumeAttributesClass\]`, // disabled Beta + `\[sig-cli\] Kubectl client Kubectl prune with applyset should apply and prune objects`, // Alpha feature since k8s 1.27 + // 4.19 + `\[Feature:PodLevelResources\]`, + `\[Feature:SchedulerAsyncPreemption\]`, + `\[Feature:RelaxedDNSSearchValidation\]`, + `\[Feature:PodLogsQuerySplitStreams\]`, + `\[Feature:PodLifecycleSleepActionAllowZero\]`, + }, + // tests for features that are not implemented in openshift + "[Disabled:Unimplemented]": { + `Monitoring`, // Not installed, should be + `Cluster level logging`, // Not installed yet + `Kibana`, // Not installed + `Ubernetes`, // Can't set zone labels today + `kube-ui`, // Not installed by default + `Kubernetes Dashboard`, // Not installed by default (also probably slow image pull) + `should proxy to cadvisor`, // we don't expose cAdvisor port directly for security reasons + `\[Feature:BootstrapTokens\]`, // we don't serve cluster-info configmap + `\[Feature:KubeProxyDaemonSetMigration\]`, // upgrades are run separately + `\[Feature:BoundServiceAccountTokenVolume\]`, // upgrades are run separately + `\[Feature:StatefulUpgrade\]`, // upgrades are run separately + }, + // tests that rely on special configuration that we do not yet support + "[Disabled:SpecialConfig]": { + // GPU node needs to be available + `\[Feature:GPUDevicePlugin\]`, + `\[sig-scheduling\] GPUDevicePluginAcrossRecreate \[Feature:Recreate\]`, + + `\[Feature:LocalStorageCapacityIsolation\]`, // relies on a separate daemonset? + `\[sig-cloud-provider-gcp\]`, // these test require a different configuration - note that GCE tests from the sig-cluster-lifecycle were moved to the sig-cloud-provider-gcpcluster lifecycle see https://github.com/kubernetes/kubernetes/commit/0b3d50b6dccdc4bbd0b3e411c648b092477d79ac#diff-3b1910d08fb8fd8b32956b5e264f87cb + + `kube-dns-autoscaler`, // Don't run kube-dns + `should check if Kubernetes master services is included in cluster-info`, // Don't run kube-dns + `DNS configMap`, // this tests dns federation configuration via configmap, which we don't support yet + + `NodeProblemDetector`, // requires a non-master node to run on + `Advanced Audit should audit API calls`, // expects to be able to call /logs + + `Firewall rule should have correct firewall rules for e2e cluster`, // Upstream-install specific + + // https://bugzilla.redhat.com/show_bug.cgi?id=2079958 + `\[sig-network\] \[Feature:Topology Hints\] should distribute endpoints evenly`, + + // Tests require SSH configuration and is part of the parallel suite, which does not create the bastion + // host. Enabling the test would result in the bastion being created for every parallel test execution. + // Given that we have existing oc and WMCO tests that cover this functionality, we can safely disable it. + `\[Feature:NodeLogQuery\]`, + }, + // tests that are known broken and need to be fixed upstream or in openshift + // always add an issue here + "[Disabled:Broken]": { + `mount an API token into pods`, // We add 6 secrets, not 1 + `ServiceAccounts should ensure a single API token exists`, // We create lots of secrets + `unchanging, static URL paths for kubernetes api services`, // the test needs to exclude URLs that are not part of conformance (/logs) + `Services should be able to up and down services`, // we don't have wget installed on nodes + `KubeProxy should set TCP CLOSE_WAIT timeout`, // the test require communication to port 11302 in the cluster nodes + `should check kube-proxy urls`, // previously this test was skipped b/c we reported -1 as the number of nodes, now we report proper number and test fails + `SSH`, // TRIAGE + `should implement service.kubernetes.io/service-proxy-name`, // this is an optional test that requires SSH. sig-network + `recreate nodes and ensure they function upon restart`, // https://bugzilla.redhat.com/show_bug.cgi?id=1756428 + `\[Driver: iscsi\]`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711627 + + "RuntimeClass should reject", + + `Services should implement service.kubernetes.io/headless`, // requires SSH access to function, needs to be refactored + `ClusterDns \[Feature:Example\] should create pod that uses dns`, // doesn't use bindata, not part of kube test binary + `Simple pod should return command exit codes should handle in-cluster config`, // kubectl cp doesn't work or is not preserving executable bit, we have this test already + + // TODO(node): configure the cri handler for the runtime class to make this work + "should run a Pod requesting a RuntimeClass with a configured handler", + "should reject a Pod requesting a RuntimeClass with conflicting node selector", + "should run a Pod requesting a RuntimeClass with scheduling", + + // A fix is in progress: https://github.com/openshift/origin/pull/24709 + `Multi-AZ Clusters should spread the pods of a replication controller across zones`, + + // Upstream assumes all control plane pods are in kube-system namespace and we should revert the change + // https://github.com/kubernetes/kubernetes/commit/176c8e219f4c7b4c15d34b92c50bfa5ba02b3aba#diff-28a3131f96324063dd53e17270d435a3b0b3bd8f806ee0e33295929570eab209R78 + "MetricsGrabber should grab all metrics from a Kubelet", + "MetricsGrabber should grab all metrics from API server", + "MetricsGrabber should grab all metrics from a ControllerManager", + "MetricsGrabber should grab all metrics from a Scheduler", + + // https://bugzilla.redhat.com/show_bug.cgi?id=1906808 + `ServiceAccounts should support OIDC discovery of service account issuer`, + + // NFS umount is broken in kernels 5.7+ + // https://bugzilla.redhat.com/show_bug.cgi?id=1854379 + `\[sig-storage\].*\[Driver: nfs\] \[Testpattern: Dynamic PV \(default fs\)\].*subPath should be able to unmount after the subpath directory is deleted`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1986306 + `\[sig-cli\] Kubectl client kubectl wait should ignore not found error with --for=delete`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1980141 + `Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector`, + `Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector`, + `Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions`, + `Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions`, + `Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector`, + `Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces`, + `Netpol NetworkPolicy between server and client should enforce updated policy`, + `Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors`, + `Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors`, + `Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector`, + `Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy`, + `Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should enforce policy based on Ports`, + `Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector`, + + `Topology Hints should distribute endpoints evenly`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1908645 + `\[sig-network\] Networking Granular Checks: Services should function for service endpoints using hostNetwork`, + `\[sig-network\] Networking Granular Checks: Services should function for pod-Service\(hostNetwork\)`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1952460 + `\[sig-network\] Firewall rule control plane should not expose well-known ports`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1988272 + `\[sig-network\] Networking should provide Internet connection for containers \[Feature:Networking-IPv6\]`, + `\[sig-network\] Networking should provider Internet connection for containers using DNS`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1957894 + `\[sig-node\] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1952457 + `\[sig-node\] crictl should be able to run crictl on the node`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1953478 + `\[sig-storage\] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV`, + + // https://issues.redhat.com/browse/OCPBUGS-34577 + `\[sig-storage\] Multi-AZ Cluster Volumes should schedule pods in the same zones as statically provisioned PVs`, + + // https://issues.redhat.com/browse/OCPBUGS-34594 + `\[sig-node\] \[Feature:PodLifecycleSleepAction\] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action`, + + // https://issues.redhat.com/browse/OCPBUGS-38839 + `\[sig-network\] \[Feature:Traffic Distribution\] when Service has trafficDistribution=PreferClose should route traffic to an endpoint that is close to the client`, + }, + // tests that need to be temporarily disabled while the rebase is in progress. + "[Disabled:RebaseInProgress]": { + // https://issues.redhat.com/browse/OCPBUGS-7297 + `DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy`, + + // https://issues.redhat.com/browse/OCPBUGS-45275 + `\[sig-network\] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod`, + + // https://issues.redhat.com/browse/OCPBUGS-17194 + `\[sig-node\] ImageCredentialProvider \[Feature:KubeletCredentialProviders\] should be able to create pod with image credentials fetched from external credential provider`, + + // https://issues.redhat.com/browse/OCPBUGS-45214 + // Even though this feature is not GA in k/k, it will be GA in OCP 4.19, so we should fix it and unskip this test + `\[Feature:volumegroupsnapshot\]`, + + // https://issues.redhat.com/browse/OCPBUGS-45273 + `\[sig-network\] Services should implement NodePort and HealthCheckNodePort correctly when ExternalTrafficPolicy changes`, + + // https://issues.redhat.com/browse/OCPBUGS-45273 + `\[sig-cli\] Kubectl Port forwarding Shutdown client connection while the remote stream is writing data to the port-forward connection port-forward should keep working after detect broken connection`, + + // https://issues.redhat.com/browse/OCPBUGS-45274 + // https://github.com/kubernetes/kubernetes/issues/129056 + `\[sig-node\] PodRejectionStatus Kubelet should reject pod when the node didn't have enough resource`, + + // https://issues.redhat.com/browse/OCPBUGS-45359 + `\[Feature:RecoverVolumeExpansionFailure\]`, + + // https://issues.redhat.com/browse/OCPBUGS-46477 + `\[sig-storage\] In-tree Volumes \[Driver: azure-file\]`, + }, + // tests that may work, but we don't support them + "[Disabled:Unsupported]": { + `\[Driver: rbd\]`, // OpenShift 4.x does not support Ceph RBD (use CSI instead) + `\[Driver: ceph\]`, // OpenShift 4.x does not support CephFS (use CSI instead) + `\[Driver: gluster\]`, // OpenShift 4.x does not support Gluster + `Volumes GlusterFS`, // OpenShift 4.x does not support Gluster + `GlusterDynamicProvisioner`, // OpenShift 4.x does not support Gluster + + // Skip vSphere-specific storage tests. The standard in-tree storage tests for vSphere + // (prefixed with `In-tree Volumes [Driver: vsphere]`) are enough for testing this plugin. + // https://bugzilla.redhat.com/show_bug.cgi?id=2019115 + `\[sig-storage\].*\[Feature:vsphere\]`, + // Also, our CI doesn't support topology, so disable those tests + `\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies`, + `\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`, + `\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(immediate binding\)\] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies`, + `\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(immediate binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`, + }, + // tests too slow to be part of conformance + "[Slow]": { + `\[sig-scalability\]`, // disable from the default set for now + `should create and stop a working application`, // Inordinately slow tests + + `\[Feature:PerformanceDNS\]`, // very slow + + `validates that there exists conflict between pods with same hostPort and protocol but one using 0\.0\.0\.0 hostIP`, // 5m, really? + }, + // tests that are known flaky + "[Flaky]": { + `Job should run a job to completion when tasks sometimes fail and are not locally restarted`, // seems flaky, also may require too many resources + // TODO(node): test works when run alone, but not in the suite in CI + `\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`, + }, + // tests that must be run without competition + "[Serial]": { + `\[Disruptive\]`, + `\[Feature:Performance\]`, // requires isolation + + `Service endpoints latency`, // requires low latency + `Clean up pods on node`, // schedules up to max pods per node + `DynamicProvisioner should test that deleting a claim before the volume is provisioned deletes the volume`, // test is very disruptive to other tests + + `Should be able to support the 1\.7 Sample API Server using the current Aggregator`, // down apiservices break other clients today https://bugzilla.redhat.com/show_bug.cgi?id=1623195 + + `\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`, + + `should prevent Ingress creation if more than 1 IngressClass marked as default`, // https://bugzilla.redhat.com/show_bug.cgi?id=1822286 + + `\[sig-network\] IngressClass \[Feature:Ingress\] should set default value on new IngressClass`, //https://bugzilla.redhat.com/show_bug.cgi?id=1833583 + }, + // Tests that don't pass on disconnected, either due to requiring + // internet access for GitHub (e.g. many of the s2i builds), or + // because of pullthrough not supporting ICSP (https://bugzilla.redhat.com/show_bug.cgi?id=1918376) + "[Skipped:Disconnected]": { + // Internet access required + `\[sig-network\] Networking should provide Internet connection for containers`, + }, + "[Skipped:alibabacloud]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:aws]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[sig-network\] LoadBalancers \[Feature:LoadBalancer\] .* UDP`, + `\[sig-network\] LoadBalancers \[Feature:LoadBalancer\] .* session affinity`, + }, + "[Skipped:azure]": { + "Networking should provide Internet connection for containers", // Azure does not allow ICMP traffic to internet. + // Azure CSI migration changed how we treat regions without zones. + // See https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=2066865 + `\[sig-storage\] In-tree Volumes \[Driver: azure-disk\] \[Testpattern: Dynamic PV \(immediate binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`, + `\[sig-storage\] In-tree Volumes \[Driver: azure-disk\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`, + }, + "[Skipped:baremetal]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:gce]": { + // Requires creation of a different compute instance in a different zone and is not compatible with volumeBindingMode of WaitForFirstConsumer which we use in 4.x + `\[sig-storage\] Multi-AZ Cluster Volumes should only be allowed to provision PDs in zones where nodes exist`, + + // The following tests try to ssh directly to a node. None of our nodes have external IPs + `\[k8s.io\] \[sig-node\] crictl should be able to run crictl on the node`, + `\[sig-storage\] Flexvolumes should be mountable`, + `\[sig-storage\] Detaching volumes should not work when mount is in progress`, + + // We are using ovn-kubernetes to conceal metadata + `\[sig-auth\] Metadata Concealment should run a check-metadata-concealment job to completion`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1740959 + `\[sig-api-machinery\] AdmissionWebhook should be able to deny pod and configmap creation`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1745720 + `\[sig-storage\] CSI Volumes \[Driver: pd.csi.storage.gke.io\]`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1749882 + `\[sig-storage\] CSI Volumes CSI Topology test using GCE PD driver \[Serial\]`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1751367 + `gce-localssd-scsi-fs`, + + // https://bugzilla.redhat.com/show_bug.cgi?id=1750851 + // should be serial if/when it's re-enabled + `\[HPA\] Horizontal pod autoscaling \(scale resource: Custom Metrics from Stackdriver\)`, + `\[Feature:CustomMetricsAutoscaling\]`, + }, + "[Skipped:ibmcloud]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:kubevirt]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:nutanix]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:openstack]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:ovirt]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + "[Skipped:vsphere]": { + // LoadBalancer tests in 1.31 require explicit platform-specific skips + // https://issues.redhat.com/browse/OCPBUGS-38840 + `\[Feature:LoadBalancer\]`, + }, + + "[sig-node]": { + `\[NodeConformance\]`, + `NodeLease`, + `lease API`, + `\[NodeFeature`, + `\[NodeAlphaFeature`, + `Probing container`, + `Security Context When creating a`, + `Downward API should create a pod that prints his name and namespace`, + `Liveness liveness pods should be automatically restarted`, + `Secret should create a pod that reads a secret`, + `Pods should delete a collection of pods`, + `Pods should run through the lifecycle of Pods and PodStatus`, + }, + "[sig-cluster-lifecycle]": { + `Feature:ClusterAutoscalerScalability`, + `recreate nodes and ensure they function`, + }, + "[sig-arch]": { + // not run, assigned to arch as catch-all + `\[Feature:GKELocalSSD\]`, + `\[Feature:GKENodePool\]`, + }, + + // These tests are skipped when openshift-tests needs to use a proxy to reach the + // cluster -- either because the test won't work while proxied, or because the test + // itself is testing a functionality using it's own proxy. + "[Skipped:Proxy]": { + // These tests setup their own proxy, which won't work when we need to access the + // cluster through a proxy. + `\[sig-cli\] Kubectl client Simple pod should support exec through an HTTP proxy`, + `\[sig-cli\] Kubectl client Simple pod should support exec through kubectl proxy`, + + // Kube currently uses the x/net/websockets pkg, which doesn't work with proxies. + // See: https://github.com/kubernetes/kubernetes/pull/103595 + `\[sig-node\] Pods should support retrieving logs from the container over websockets`, + `\[sig-cli\] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets`, + `\[sig-cli\] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets`, + `\[sig-node\] Pods should support remote command execution over websockets`, + + // These tests are flacky and require internet access + // See https://bugzilla.redhat.com/show_bug.cgi?id=2019375 + `\[sig-network\] DNS should resolve DNS of partial qualified names for services`, + `\[sig-network\] DNS should provide DNS for the cluster`, + // This test does not work when using in-proxy cluster, see https://bugzilla.redhat.com/show_bug.cgi?id=2084560 + `\[sig-network\] Networking should provide Internet connection for containers`, + }, + + "[Skipped:SingleReplicaTopology]": { + `\[sig-apps\] Daemon set \[Serial\] should rollback without unnecessary restarts \[Conformance\]`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] doesn't evict pod with tolerations from tainted nodes`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] eventually evict pod with finite tolerations from tainted nodes`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] evicts pods from tainted nodes`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] removing taint cancels eviction \[Disruptive\] \[Conformance\]`, + `\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] pods evicted from tainted nodes have pod disruption condition`, + `\[sig-node\] NoExecuteTaintManager Multiple Pods \[Serial\] evicts pods with minTolerationSeconds \[Disruptive\] \[Conformance\]`, + `\[sig-node\] NoExecuteTaintManager Multiple Pods \[Serial\] only evicts pods without tolerations from tainted nodes`, + `\[sig-cli\] Kubectl client Kubectl taint \[Serial\] should remove all the taints with the same key off a node`, + `\[sig-network\] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes`, + `\[sig-network\] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes`, + `\[sig-architecture\] Conformance Tests should have at least two untainted nodes`, + }, + + // Tests which can't be run/don't make sense to run against a cluster with all optional capabilities disabled + "[Skipped:NoOptionalCapabilities]": { + // Requires CSISnapshot capability + `\[Feature:VolumeSnapshotDataSource\]`, + // Requires Storage capability + `\[Driver: aws\]`, + `\[Feature:StorageProvider\]`, + }, + + // tests that don't pass under OVN Kubernetes + "[Skipped:Network/OVNKubernetes]": { + // ovn-kubernetes does not support named ports + `NetworkPolicy.*named port`, + }, + + "[Skipped:ibmroks]": { + // Calico is allowing the request to timeout instead of returning 'REFUSED' + // https://bugzilla.redhat.com/show_bug.cgi?id=1825021 - ROKS: calico SDN results in a request timeout when accessing services with no endpoints + `\[sig-network\] Services should be rejected when no endpoints exist`, + + // Nodes in ROKS have access to secrets in the cluster to handle encryption + // https://bugzilla.redhat.com/show_bug.cgi?id=1825013 - ROKS: worker nodes have access to secrets in the cluster + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error`, + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error`, + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a secret for a workload the node has access to should succeed`, + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting an existing configmap should exit with the Forbidden error`, + `\[sig-auth\] \[Feature:NodeAuthorizer\] Getting an existing secret should exit with the Forbidden error`, + + // Access to node external address is blocked from pods within a ROKS cluster by Calico + // https://bugzilla.redhat.com/show_bug.cgi?id=1825016 - e2e: NodeAuthenticator tests use both external and internal addresses for node + `\[sig-auth\] \[Feature:NodeAuthenticator\] The kubelet's main port 10250 should reject requests with no credentials`, + `\[sig-auth\] \[Feature:NodeAuthenticator\] The kubelet can delegate ServiceAccount tokens to the API server`, + + // Mode returned by RHEL7 worker contains an extra character not expected by the test: dgtrwx vs dtrwx + // https://bugzilla.redhat.com/show_bug.cgi?id=1825024 - e2e: Failing test - HostPath should give a volume the correct mode + `\[sig-storage\] HostPath should give a volume the correct mode`, + }, + } + + ExcludedTests = []string{ + `\[Disabled:`, + `\[Disruptive\]`, + `\[Skipped\]`, + `\[Slow\]`, + `\[Flaky\]`, + `\[Local\]`, + } +) diff --git a/openshift-hack/e2e/annotate/rules_test.go b/openshift-hack/e2e/annotate/rules_test.go new file mode 100644 index 0000000000000..6a413a2ef92ac --- /dev/null +++ b/openshift-hack/e2e/annotate/rules_test.go @@ -0,0 +1,92 @@ +package annotate + +import ( + "testing" + + "github.com/onsi/ginkgo/v2/types" +) + +type testNode struct { + text string +} + +func (n *testNode) CodeLocations() []types.CodeLocation { + return []types.CodeLocation{{FileName: "k8s.io/kubernetes"}} +} + +func (n *testNode) Text() string { + return n.text +} + +func (n *testNode) AppendText(text string) { + n.text += text +} + +func (n *testNode) Labels() []string { + return nil +} + +func TestStockRules(t *testing.T) { + tests := []struct { + name string + + testName string + + expectedLabel string + expectedText string + }{ + { + name: "simple serial match", + testName: "[Serial] test", + expectedLabel: " [Suite:openshift/conformance/serial]", + expectedText: "[Serial] test [Suite:openshift/conformance/serial]", + }, + { + name: "don't tag skipped", + testName: `[Serial] example test [Skipped:gce]`, + expectedLabel: ` [Suite:openshift/conformance/serial]`, + expectedText: `[Serial] example test [Skipped:gce] [Suite:openshift/conformance/serial]`, // notice that this isn't categorized into any of our buckets + }, + { + name: "not skipped", + testName: `[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]`, + expectedLabel: ` [Suite:openshift/conformance/parallel/minimal]`, + expectedText: `[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal]`, + }, + { + name: "should skip localssd on gce", + testName: `[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted`, + expectedLabel: ` [Skipped:gce] [Suite:openshift/conformance/serial]`, + expectedText: `[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial]`, // notice that this isn't categorized into any of our buckets + }, + { + name: "should skip NetworkPolicy tests on multitenant", + testName: `should do something with NetworkPolicy`, + expectedLabel: ` [Suite:openshift/conformance/parallel]`, + expectedText: `should do something with NetworkPolicy [Suite:openshift/conformance/parallel]`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testRenamer := newGenerator(TestMaps) + testNode := &testNode{ + text: test.testName, + } + + testRenamer.generateRename(test.testName, testNode) + changed := testRenamer.output[test.testName] + + if e, a := test.expectedLabel, changed; e != a { + t.Error(a) + } + testRenamer = newRenamerFromGenerated(map[string]string{test.testName: test.expectedLabel}) + testRenamer.updateNodeText(test.testName, testNode) + + if e, a := test.expectedText, testNode.Text(); e != a { + t.Logf(e) + t.Error(a) + } + }) + } +} diff --git a/openshift-hack/e2e/include.go b/openshift-hack/e2e/include.go new file mode 100644 index 0000000000000..48efbca4a3e38 --- /dev/null +++ b/openshift-hack/e2e/include.go @@ -0,0 +1,41 @@ +package e2e + +// This file should import all the packages defining k8s e2e tests that are +// relevant to openshift. It should match the imports from +// k8s.io/kubernetes/test/e2e/e2e_test.go. It is intended to affect: +// +// - what is included in the k8s-e2e.test binary built from this package +// - the annotations generated by the annotate package + +import ( + // define and freeze constants + _ "k8s.io/kubernetes/test/e2e/feature" + _ "k8s.io/kubernetes/test/e2e/nodefeature" + + // test sources + _ "k8s.io/kubernetes/test/e2e/apimachinery" + _ "k8s.io/kubernetes/test/e2e/apps" + _ "k8s.io/kubernetes/test/e2e/architecture" + _ "k8s.io/kubernetes/test/e2e/auth" + _ "k8s.io/kubernetes/test/e2e/autoscaling" + _ "k8s.io/kubernetes/test/e2e/cloud" + _ "k8s.io/kubernetes/test/e2e/common" + _ "k8s.io/kubernetes/test/e2e/dra" + _ "k8s.io/kubernetes/test/e2e/instrumentation" + _ "k8s.io/kubernetes/test/e2e/kubectl" + _ "k8s.io/kubernetes/test/e2e/lifecycle" + _ "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap" + _ "k8s.io/kubernetes/test/e2e/network" + _ "k8s.io/kubernetes/test/e2e/node" + _ "k8s.io/kubernetes/test/e2e/scheduling" + _ "k8s.io/kubernetes/test/e2e/storage" + _ "k8s.io/kubernetes/test/e2e/storage/csimock" + _ "k8s.io/kubernetes/test/e2e/storage/external" + _ "k8s.io/kubernetes/test/e2e/windows" + + // reconfigure framework + _ "k8s.io/kubernetes/test/e2e/framework/debug/init" + _ "k8s.io/kubernetes/test/e2e/framework/metrics/init" + _ "k8s.io/kubernetes/test/e2e/framework/node/init" + _ "k8s.io/kubernetes/test/utils/format" +) diff --git a/openshift-hack/e2e/kube_e2e_test.go b/openshift-hack/e2e/kube_e2e_test.go new file mode 100644 index 0000000000000..19414493f57c5 --- /dev/null +++ b/openshift-hack/e2e/kube_e2e_test.go @@ -0,0 +1,110 @@ +package e2e + +//go:generate go run -mod vendor ./annotate/cmd -- ./annotate/generated/zz_generated.annotations.go + +// This file duplicates most of test/e2e/e2e_test.go but limits the included +// tests (via include.go) to tests that are relevant to openshift. + +import ( + "context" + "flag" + "fmt" + "math/rand" + "os" + "testing" + "time" + + "gopkg.in/yaml.v2" + + // Never, ever remove the line with "/ginkgo". Without it, + // the ginkgo test runner will not detect that this + // directory contains a Ginkgo test suite. + // See https://github.com/kubernetes/kubernetes/issues/74827 + // "github.com/onsi/ginkgo/v2" + + corev1 "k8s.io/api/core/v1" + kclientset "k8s.io/client-go/kubernetes" + "k8s.io/component-base/version" + conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" + "k8s.io/kubernetes/test/e2e" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" + testfixtures "k8s.io/kubernetes/test/fixtures" + "k8s.io/kubernetes/test/utils/image" + + // Ensure test annotation + _ "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" +) + +func TestMain(m *testing.M) { + var versionFlag bool + flag.CommandLine.BoolVar(&versionFlag, "version", false, "Displays version information.") + + // Register test flags, then parse flags. + e2e.HandleFlags() + + if framework.TestContext.ListImages { + for _, v := range image.GetImageConfigs() { + fmt.Println(v.GetE2EImage()) + } + os.Exit(0) + } + if versionFlag { + fmt.Printf("%s\n", version.Get()) + os.Exit(0) + } + + // Enable embedded FS file lookup as fallback + testfiles.AddFileSource(e2etestingmanifests.GetE2ETestingManifestsFS()) + testfiles.AddFileSource(testfixtures.GetTestFixturesFS()) + testfiles.AddFileSource(conformancetestdata.GetConformanceTestdataFS()) + + if framework.TestContext.ListConformanceTests { + var tests []struct { + Testname string `yaml:"testname"` + Codename string `yaml:"codename"` + Description string `yaml:"description"` + Release string `yaml:"release"` + File string `yaml:"file"` + } + + data, err := testfiles.Read("test/conformance/testdata/conformance.yaml") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := yaml.Unmarshal(data, &tests); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := yaml.NewEncoder(os.Stdout).Encode(tests); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Exit(0) + } + + // Ensure the test namespaces have disabled SCCs and label syncer. + framework.TestContext.CreateTestingNS = func(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*corev1.Namespace, error) { + return CreateTestingNS(ctx, baseName, c, labels, true) + } + + framework.AfterReadingAllFlags(&framework.TestContext) + + // TODO: Deprecating repo-root over time... instead just use gobindata_util.go , see #23987. + // Right now it is still needed, for example by + // test/e2e/framework/ingress/ingress_utils.go + // for providing the optional secret.yaml file and by + // test/e2e/framework/util.go for cluster/log-dump. + if framework.TestContext.RepoRoot != "" { + testfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot}) + } + + rand.Seed(time.Now().UnixNano()) + os.Exit(m.Run()) +} + +func TestE2E(t *testing.T) { + e2e.RunE2ETests(t) +} diff --git a/openshift-hack/e2e/namespace.go b/openshift-hack/e2e/namespace.go new file mode 100644 index 0000000000000..061e37270072e --- /dev/null +++ b/openshift-hack/e2e/namespace.go @@ -0,0 +1,132 @@ +package e2e + +import ( + "context" + "fmt" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo/v2" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kclientset "k8s.io/client-go/kubernetes" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" + "k8s.io/client-go/util/retry" + "k8s.io/kubernetes/test/e2e/framework" + + projectv1 "github.com/openshift/api/project/v1" +) + +// CreateTestingNS ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs +func CreateTestingNS(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string, isKubeNamespace bool) (*corev1.Namespace, error) { + if !strings.HasPrefix(baseName, "e2e-") { + baseName = "e2e-" + baseName + } + + if labels == nil { + labels = map[string]string{} + } + // turn off the OpenShift label syncer so that it does not attempt to sync + // the PodSecurity admission labels + labels["security.openshift.io/scc.podSecurityLabelSync"] = "false" + + if isKubeNamespace { + labels["security.openshift.io/disable-securitycontextconstraints"] = "true" + } + + ns, err := framework.CreateTestingNS(ctx, baseName, c, labels) + if err != nil { + return ns, err + } + + if !isKubeNamespace { + return ns, err + } + + // Add anyuid and privileged permissions for upstream tests + clientConfig, err := framework.LoadConfig() + if err != nil { + return ns, err + } + + rbacClient, err := rbacv1client.NewForConfig(clientConfig) + if err != nil { + return ns, err + } + framework.Logf("About to run a Kube e2e test, ensuring namespace/%s is privileged", ns.Name) + // add the "privileged" scc to ensure pods that explicitly + // request extra capabilities are not rejected + addRoleToE2EServiceAccounts(ctx, rbacClient, []corev1.Namespace{*ns}, "system:openshift:scc:privileged") + // add the "anyuid" scc to ensure pods that don't specify a + // uid don't get forced into a range (mimics upstream + // behavior) + addRoleToE2EServiceAccounts(ctx, rbacClient, []corev1.Namespace{*ns}, "system:openshift:scc:anyuid") + // add the "hostmount-anyuid" scc to ensure pods using hostPath + // can execute tests + addRoleToE2EServiceAccounts(ctx, rbacClient, []corev1.Namespace{*ns}, "system:openshift:scc:hostmount-anyuid") + + // The intra-pod test requires that the service account have + // permission to retrieve service endpoints. + addRoleToE2EServiceAccounts(ctx, rbacClient, []corev1.Namespace{*ns}, "view") + + // in practice too many kube tests ignore scheduling constraints + allowAllNodeScheduling(ctx, c, ns.Name) + + return ns, err +} + +var longRetry = wait.Backoff{Steps: 100} + +func fatalErr(msg interface{}) { + // the path that leads to this being called isn't always clear... + fmt.Fprintln(ginkgo.GinkgoWriter, string(debug.Stack())) + framework.Failf("%v", msg) +} + +func addRoleToE2EServiceAccounts(ctx context.Context, rbacClient rbacv1client.RbacV1Interface, namespaces []corev1.Namespace, roleName string) { + err := retry.RetryOnConflict(longRetry, func() error { + for _, ns := range namespaces { + if ns.Status.Phase != corev1.NamespaceTerminating { + _, err := rbacClient.RoleBindings(ns.Name).Create(ctx, &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{GenerateName: "default-" + roleName, Namespace: ns.Name}, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: roleName, + }, + Subjects: []rbacv1.Subject{ + {Name: "default", Namespace: ns.Name, Kind: rbacv1.ServiceAccountKind}, + }, + }, metav1.CreateOptions{}) + if err != nil { + framework.Logf("Warning: Failed to add role to e2e service account: %v", err) + } + } + } + return nil + }) + if err != nil { + fatalErr(err) + } +} + +// allowAllNodeScheduling sets the annotation on namespace that allows all nodes to be scheduled onto. +func allowAllNodeScheduling(ctx context.Context, c kclientset.Interface, namespace string) { + err := retry.RetryOnConflict(longRetry, func() error { + ns, err := c.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if err != nil { + return err + } + if ns.Annotations == nil { + ns.Annotations = make(map[string]string) + } + ns.Annotations[projectv1.ProjectNodeSelector] = "" + _, err = c.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{}) + return err + }) + if err != nil { + fatalErr(err) + } +} diff --git a/openshift-hack/images/OWNERS b/openshift-hack/images/OWNERS new file mode 100644 index 0000000000000..7b196b0fb7003 --- /dev/null +++ b/openshift-hack/images/OWNERS @@ -0,0 +1,11 @@ +reviewers: + - smarterclayton + - giuseppe + - JacobTanenbaum + - pweil- + - pecameron + - sdodson +approvers: + - smarterclayton + - pweil- + - sdodson diff --git a/openshift-hack/images/hyperkube/Dockerfile.rhel b/openshift-hack/images/hyperkube/Dockerfile.rhel new file mode 100644 index 0000000000000..fabdce0b9c026 --- /dev/null +++ b/openshift-hack/images/hyperkube/Dockerfile.rhel @@ -0,0 +1,17 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +RUN make WHAT='cmd/kube-apiserver cmd/kube-controller-manager cmd/kube-scheduler cmd/kubelet cmd/watch-termination openshift-hack/cmd/k8s-tests openshift-hack/cmd/k8s-tests-ext' && \ + mkdir -p /tmp/build && \ + cp openshift-hack/images/hyperkube/hyperkube openshift-hack/images/hyperkube/kubensenter /tmp/build && \ + cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/{kube-apiserver,kube-controller-manager,kube-scheduler,kubelet,watch-termination,k8s-tests,k8s-tests-ext} \ + /tmp/build && \ + gzip /tmp/build/k8s-tests-ext + +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +RUN yum install -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False iproute && yum clean all +COPY --from=builder /tmp/build/* /usr/bin/ +LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \ + io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \ + io.openshift.tags="openshift,hyperkube" \ + io.openshift.build.versions="kubernetes=1.32.1" \ No newline at end of file diff --git a/openshift-hack/images/hyperkube/OWNERS b/openshift-hack/images/hyperkube/OWNERS new file mode 100644 index 0000000000000..e814678493032 --- /dev/null +++ b/openshift-hack/images/hyperkube/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - smarterclayton + - sdodson +approvers: + - smarterclayton diff --git a/openshift-hack/images/hyperkube/hyperkube b/openshift-hack/images/hyperkube/hyperkube new file mode 100755 index 0000000000000..cfed9cd737c02 --- /dev/null +++ b/openshift-hack/images/hyperkube/hyperkube @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +BINS=( + kube-apiserver + kube-controller-manager + kube-scheduler + kubelet +) + +function array_contains() { + local search="$1" + local element + shift + for element; do + if [[ "${element}" == "${search}" ]]; then + return 0 + fi + done + return 1 +} + +function print_usage() { + cat </dev/null; then + echo "${command}: command not found" + exit 1 + fi + exec "${command}" "${@}" +} + +main "${@}" \ No newline at end of file diff --git a/openshift-hack/images/hyperkube/kubensenter b/openshift-hack/images/hyperkube/kubensenter new file mode 100644 index 0000000000000..56ab26ee9e952 --- /dev/null +++ b/openshift-hack/images/hyperkube/kubensenter @@ -0,0 +1,117 @@ +#!/bin/bash + +# shellcheck disable=SC2016 +usage() { + echo "A command line wrapper to run commands or shells inside the" + echo "kubens.service mount namespace." + echo + echo "Usage:" + echo " $(basename "$0") [--verbose|--quiet] [command ...]" + echo + echo 'Autodetect whether the `kubens.service` has pinned a mount namespace in a' + echo 'well-known location, and if so, join it by passing it and the user-specified' + echo 'command to nsenter(1). If `kubens.service` has not set up the mount namespace,' + echo 'the user-specified command is still executed by nsenter(1) but no namespace is' + echo 'entered.' + echo + echo 'If $KUBENSMNT is set in the environment, skip autodetection and attempt to join' + echo 'that mount namespace by passing it and the user-specified command to' + echo 'nsenter(1). If the mount namespace is missing or invalid, the command will' + echo 'fail.' + echo + echo 'In either case, if no command is given on the command line, nsenter(1) will' + echo 'spawn a new interactive shell which will be inside the mount namespace if' + echo 'detected.' + exit 1 +} + +LOGLEVEL=${KUBENSENTER_LOG:-1} +_log() { + local level=$1; shift + if [[ $level -le $LOGLEVEL ]]; then + echo "kubensenter: $*" >&2 + fi +} + +info() { + _log 1 "$*" +} + +debug() { + _log 2 "$*" +} + +# Returns 0 if the argument given is a mount namespace +ismnt() { + local nsfs + nsfs=$(findmnt -o SOURCE -n -t nsfs "$1") + [[ $nsfs =~ ^nsfs\[mnt:\[ ]] +} + +# Set KUBENSMNT to the default location that kubens.service uses if KUBENSMNT isn't already set. +DEFAULT_KUBENSMNT=${DEFAULT_KUBENSMNT:-"/run/kubens/mnt"} +autodetect() { + local default=$DEFAULT_KUBENSMNT + if [[ -n $KUBENSMNT ]]; then + debug "Autodetect: \$KUBENSMNT already set" + return 0 + fi + if [[ ! -e $default ]]; then + debug "Autodetect: No mount namespace found at $default" + return 1 + fi + if ! ismnt "$default"; then + info "Autodetect: Stale or mismatched namespace at $default" + return 1 + fi + KUBENSMNT=$default + info "Autodetect: kubens.service namespace found at $KUBENSMNT" + return 0 +} + +# Wrap the user-given command in nsenter, joining the mount namespace set in $KUBENSMNT if set +kubensenter() { + local nsarg + if [[ -n $KUBENSMNT ]]; then + debug "Joining mount namespace in $KUBENSMNT" + nsarg=$(printf -- "--mount=%q" "$KUBENSMNT") + else + debug "KUBENSMNT not set; running normally" + # Intentional fallthrough to run nsenter anyway: + # - If $@ is non-empty, nsenter effectively runs `exec "$@"` + # - If $@ is empty, nsenter spawns a new shell + fi + # Using 'exec' is important here; Without it, systemd may have trouble + # seeing the underlying process especially if it's using 'Type=notify' + # semantics. + # shellcheck disable=SC2086 + # ^- Intentionally collapse $nsarg if not set (and we've already shell-quoted it above if we did set it) + exec nsenter $nsarg "$@" +} + +main() { + while [[ -n $1 ]]; do + case "$1" in + -h | --help) + usage + ;; + -v | --verbose) + shift + ((LOGLEVEL++)) + ;; + -q | --quiet) + shift + ((LOGLEVEL--)) + ;; + *) + break + ;; + esac + done + + autodetect + kubensenter "$@" +} + +# bash modulino +[[ "${BASH_SOURCE[0]}" == "$0" ]] && main "$@" diff --git a/openshift-hack/images/installer-kube-apiserver-artifacts/Dockerfile.rhel b/openshift-hack/images/installer-kube-apiserver-artifacts/Dockerfile.rhel new file mode 100644 index 0000000000000..fb57a6042fe64 --- /dev/null +++ b/openshift-hack/images/installer-kube-apiserver-artifacts/Dockerfile.rhel @@ -0,0 +1,55 @@ +# This Dockerfile builds an image containing Mac and Linux/AMD64 versions of +# the kube-apiserver layered on top of the cluster-native Linux installer image. +# The resulting image is used to build the openshift-install binary. + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS macbuilder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV KUBE_BUILD_PLATFORMS=darwin/amd64 +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS macarmbuilder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV KUBE_BUILD_PLATFORMS=darwin/arm64 +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS linuxbuilder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV GO_COMPLIANCE_EXCLUDE=".*" +ENV KUBE_BUILD_PLATFORMS=linux/amd64 +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS linuxarmbuilder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV GO_COMPLIANCE_EXCLUDE=".*" +ENV KUBE_BUILD_PLATFORMS=linux/arm64 +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +ARG TAGS="" +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +ENV GO_COMPLIANCE_EXCLUDE=".*" +ENV KUBE_STATIC_OVERRIDES=kube-apiserver +RUN make WHAT='cmd/kube-apiserver' + +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +COPY --from=macbuilder /go/src/k8s.io/kubernetes/_output/local/bin/darwin/amd64/kube-apiserver /usr/share/openshift/darwin/amd64/kube-apiserver +COPY --from=macarmbuilder /go/src/k8s.io/kubernetes/_output/local/bin/darwin/arm64/kube-apiserver /usr/share/openshift/darwin/arm64/kube-apiserver +COPY --from=linuxbuilder /go/src/k8s.io/kubernetes/_output/local/bin/linux/amd64/kube-apiserver /usr/share/openshift/linux/amd64/kube-apiserver +COPY --from=linuxarmbuilder /go/src/k8s.io/kubernetes/_output/local/bin/linux/arm64/kube-apiserver /usr/share/openshift/linux/arm64/kube-apiserver +COPY --from=builder /go/src/k8s.io/kubernetes/_output/local/bin/ /usr/share/openshift/ + +# This image is not an operator, it is only used as part of the build pipeline. +LABEL io.openshift.release.operator=false diff --git a/openshift-hack/images/installer-kube-apiserver-artifacts/OWNERS b/openshift-hack/images/installer-kube-apiserver-artifacts/OWNERS new file mode 100644 index 0000000000000..f382794577f99 --- /dev/null +++ b/openshift-hack/images/installer-kube-apiserver-artifacts/OWNERS @@ -0,0 +1,10 @@ +reviewers: + - JoelSpeed + - vincepri + - patrickdillon + - r4f4 +approvers: + - JoelSpeed + - vincepri + - patrickdillon + - r4f4 diff --git a/openshift-hack/images/kube-proxy/Dockerfile.rhel b/openshift-hack/images/kube-proxy/Dockerfile.rhel new file mode 100644 index 0000000000000..619ce5942b8d9 --- /dev/null +++ b/openshift-hack/images/kube-proxy/Dockerfile.rhel @@ -0,0 +1,15 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +RUN make WHAT='cmd/kube-proxy' && \ + mkdir -p /tmp/build && \ + cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/kube-proxy /tmp/build + +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +RUN INSTALL_PKGS="conntrack-tools iptables nftables" && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + yum clean all && rm -rf /var/cache/* +COPY --from=builder /tmp/build/* /usr/bin/ +LABEL io.k8s.display-name="Kubernetes kube-proxy" \ + io.k8s.description="Provides kube-proxy for external CNI plugins" \ + io.openshift.tags="openshift,kube-proxy" diff --git a/openshift-hack/images/kube-proxy/OWNERS b/openshift-hack/images/kube-proxy/OWNERS new file mode 100644 index 0000000000000..df1fc6730a20b --- /dev/null +++ b/openshift-hack/images/kube-proxy/OWNERS @@ -0,0 +1,19 @@ +reviewers: + - abhat + - danwinship + - dougbtv + - JacobTanenbaum + - jcaamano + - kyrtapz + - trozet + - tssurya +approvers: + - abhat + - danwinship + - dougbtv + - fepan + - JacobTanenbaum + - jcaamano + - knobunc + - kyrtapz + - trozet diff --git a/openshift-hack/images/kube-proxy/test-kube-proxy.sh b/openshift-hack/images/kube-proxy/test-kube-proxy.sh new file mode 100755 index 0000000000000..514b52eff28d9 --- /dev/null +++ b/openshift-hack/images/kube-proxy/test-kube-proxy.sh @@ -0,0 +1,244 @@ +#!/bin/sh + +set -o nounset +set -o errexit +set -o pipefail + +# This script tests the kube-proxy image without actually using it as +# part of the infrastructure of a cluster. It is intended to be copied +# to the kubernetes-tests image for use in CI and should have no +# dependencies beyond oc and basic shell stuff. + +# There is no good way to "properly" test the kube-proxy image in +# OpenShift CI, because it is only used as a dependency of third-party +# software (e.g. Calico); no fully-RH-supported configuration uses it. +# +# However, since we don't apply any kube-proxy-specific patches to our +# tree, we can assume that it *mostly* works, since we are building +# from sources that passed upstream testing. This script is just to +# confirm that our build is not somehow completely broken (e.g. +# immediate segfault due to a bad build environment). + +if [[ -z "${KUBE_PROXY_IMAGE}" ]]; then + echo "KUBE_PROXY_IMAGE not set" 1>&2 + exit 1 +fi + +TMPDIR=$(mktemp --tmpdir -d kube-proxy.XXXXXX) +function cleanup() { + oc delete namespace kube-proxy-test || true + oc delete clusterrole kube-proxy-test || true + oc delete clusterrolebinding kube-proxy-test || true + rm -rf "${TMPDIR}" +} +trap "cleanup" EXIT + +function indent() { + sed -e 's/^/ /' "$@" + echo "" +} + +# Decide what kube-proxy mode to use. +# (jsonpath expression copied from types_cluster_version.go) +OCP_VERSION=$(oc get clusterversion version -o jsonpath='{.status.history[?(@.state=="Completed")].version}') +case "${OCP_VERSION}" in + 4.17.*|4.18.*) + # 4.17 and 4.18 always use RHEL 9 (and nftables mode was still alpha in 4.17), so + # use iptables mode + PROXY_MODE="iptables" + ;; + *) + # 4.19 and later may use RHEL 10, so use nftables mode + PROXY_MODE="nftables" + ;; +esac + +echo "Setting up Namespace and RBAC" +oc create -f - < "${TMPDIR}/metrics.txt" + grep -q '^kubeproxy_sync_proxy_rules_duration_seconds_count [^0]' "${TMPDIR}/metrics.txt" +} +synced=false +for count in $(seq 1 10); do + date + if kube_proxy_synced; then + synced=true + break + fi + sleep 5 +done +date +if [[ "${synced}" != true ]]; then + echo "kube-proxy failed to sync to ${PROXY_MODE}:" + oc logs -n kube-proxy-test kube-proxy |& indent + + echo "last-seen metrics:" + indent "${TMPDIR}/metrics.txt" + + exit 1 +fi + +# Dump the ruleset; since RHEL9 uses iptables-nft, kube-proxy's rules +# will show up in the nft ruleset regardless of whether kube-proxy is +# using iptables or nftables. +echo "Dumping rules" +oc exec -n kube-proxy-test kube-proxy -- nft list ruleset >& "${TMPDIR}/nft.out" + +# We don't want to hardcode any assumptions about what kube-proxy's +# rules look like, but it necessarily must be the case that every +# clusterIP appears somewhere in the output. (We could look for +# endpoint IPs too, but that's more racy if there's any chance the +# cluster could be changing.) +exitcode=0 +for service in kubernetes.default dns-default.openshift-dns router-default.openshift-ingress; do + name="${service%.*}" + namespace="${service#*.}" + clusterIP="$(oc get service -n ${namespace} ${name} -o jsonpath='{.spec.clusterIP}')" + echo "Looking for ${service} cluster IP (${clusterIP}) in ruleset" + for ip in ${clusterIP}; do + if ! grep --quiet --fixed-strings " ${ip} " "${TMPDIR}/nft.out"; then + echo "Did not find IP ${ip} (from service ${name} in namespace ${namespace}) in ruleset" 1>&2 + exitcode=1 + fi + done +done +echo "" + +if [[ "${exitcode}" == 1 ]]; then + echo "Ruleset was:" + indent "${TMPDIR}/nft.out" + + echo "kube-proxy logs:" + oc logs -n kube-proxy-test kube-proxy |& indent +fi + +exit "${exitcode}" diff --git a/openshift-hack/images/tests/Dockerfile.rhel b/openshift-hack/images/tests/Dockerfile.rhel new file mode 100644 index 0000000000000..ff0b2fa6e1dba --- /dev/null +++ b/openshift-hack/images/tests/Dockerfile.rhel @@ -0,0 +1,22 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +WORKDIR /go/src/k8s.io/kubernetes +COPY . . +RUN make WHAT=openshift-hack/e2e/k8s-e2e.test; \ + make WHAT=vendor/github.com/onsi/ginkgo/v2/ginkgo; \ + mkdir -p /tmp/build; \ + cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/k8s-e2e.test /tmp/build/; \ + cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/ginkgo /tmp/build/; \ + cp /go/src/k8s.io/kubernetes/openshift-hack/test-kubernetes-e2e.sh /tmp/build/; \ + cp /go/src/k8s.io/kubernetes/openshift-hack/images/kube-proxy/test-kube-proxy.sh /tmp/build/ + +FROM registry.ci.openshift.org/ocp/4.19:tools +COPY --from=builder /tmp/build/k8s-e2e.test /usr/bin/ +COPY --from=builder /tmp/build/ginkgo /usr/bin/ +COPY --from=builder /tmp/build/test-kubernetes-e2e.sh /usr/bin/ +COPY --from=builder /tmp/build/test-kube-proxy.sh /usr/bin/ +RUN yum install --setopt=tsflags=nodocs -y git gzip util-linux && yum clean all && rm -rf /var/cache/yum/* && \ + git config --system user.name test && \ + git config --system user.email test@test.com && \ + chmod g+w /etc/passwd +LABEL io.k8s.display-name="Kubernetes End-to-End Tests" \ + io.openshift.tags="k8s,tests,e2e" diff --git a/openshift-hack/images/tests/OWNERS b/openshift-hack/images/tests/OWNERS new file mode 100644 index 0000000000000..e814678493032 --- /dev/null +++ b/openshift-hack/images/tests/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - smarterclayton + - sdodson +approvers: + - smarterclayton diff --git a/openshift-hack/kubensenter.env b/openshift-hack/kubensenter.env new file mode 100644 index 0000000000000..c37c5bbab2868 --- /dev/null +++ b/openshift-hack/kubensenter.env @@ -0,0 +1,16 @@ +# Configure which version of kubensenter we need to synchronize + +# Define the github repo where we should fetch the kubensenter script +REPO="github.com/containers/kubensmnt" + +# The specific commit or tag of the kubensenter script +# Note: Should be an explicit tag or commit SHA - Setting to a branch name will cause unexpected verification failures in the future. +COMMIT=v1.2.0 # (36e5652992df9a3d4abc3d8f02a33c2e364efda9) + +# The branch name or tag glob to resolve when 'update-kubensenter.sh --to-latest' is run: +# - If this resolves to a branch, COMMIT will be set to the latest commit hash on that branch. +# - If this resolves to a tag name, COMMIT will be set to that tag. +# - May contain a glob expression such as "v1.1.*" that would match any of the following: +# v1.1.0 v1.1.3 v1.1.22-rc1" +#TARGET="main" +TARGET="v1.2.*" diff --git a/openshift-hack/lib/build/binaries.sh b/openshift-hack/lib/build/binaries.sh new file mode 100644 index 0000000000000..e3c71254f37c1 --- /dev/null +++ b/openshift-hack/lib/build/binaries.sh @@ -0,0 +1,457 @@ +#!/usr/bin/env bash + +# This library holds utility functions for building +# and placing Golang binaries for multiple arches. + +# os::build::binaries_from_targets take a list of build targets and return the +# full go package to be built +function os::build::binaries_from_targets() { + local target + for target; do + if [[ -z "${target}" ]]; then + continue + fi + echo "${OS_GO_PACKAGE}/${target}" + done +} +readonly -f os::build::binaries_from_targets + +# Asks golang what it thinks the host platform is. The go tool chain does some +# slightly different things when the target platform matches the host platform. +function os::build::host_platform() { + echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" +} +readonly -f os::build::host_platform + +# Create a user friendly version of host_platform for end users +function os::build::host_platform_friendly() { + local platform=${1:-} + if [[ -z "${platform}" ]]; then + platform=$(os::build::host_platform) + fi + if [[ $platform == "windows/amd64" ]]; then + echo "windows" + elif [[ $platform == "darwin/amd64" ]]; then + echo "mac" + elif [[ $platform == "linux/386" ]]; then + echo "linux-32bit" + elif [[ $platform == "linux/amd64" ]]; then + echo "linux-64bit" + elif [[ $platform == "linux/ppc64le" ]]; then + echo "linux-powerpc64" + elif [[ $platform == "linux/arm64" ]]; then + echo "linux-arm64" + elif [[ $platform == "linux/s390x" ]]; then + echo "linux-s390" + else + echo "$(go env GOHOSTOS)-$(go env GOHOSTARCH)" + fi +} +readonly -f os::build::host_platform_friendly + +# This converts from platform/arch to PLATFORM_ARCH, host platform will be +# considered if no parameter passed +function os::build::platform_arch() { + local platform=${1:-} + if [[ -z "${platform}" ]]; then + platform=$(os::build::host_platform) + fi + + echo "${platform}" | tr '[:lower:]/' '[:upper:]_' +} +readonly -f os::build::platform_arch + +# os::build::setup_env will check that the `go` commands is available in +# ${PATH}. If not running on Travis, it will also check that the Go version is +# good enough for the Kubernetes build. +# +# Output Vars: +# export GOPATH - A modified GOPATH to our created tree along with extra +# stuff. +# export GOBIN - This is actively unset if already set as we want binaries +# placed in a predictable place. +function os::build::setup_env() { + os::util::ensure::system_binary_exists 'go' + + if [[ -z "$(which sha256sum)" ]]; then + sha256sum() { + return 0 + } + fi + + # Travis continuous build uses a head go release that doesn't report + # a version number, so we skip this check on Travis. It's unnecessary + # there anyway. + if [[ "${TRAVIS:-}" != "true" ]]; then + os::golang::verify_go_version + fi + # For any tools that expect this to be set (it is default in golang 1.6), + # force vendor experiment. + export GO15VENDOREXPERIMENT=1 + + unset GOBIN + + # create a local GOPATH in _output + GOPATH="${OS_OUTPUT}/go" + OS_TARGET_BIN="${OS_OUTPUT}/go/bin" + local go_pkg_dir="${GOPATH}/src/${OS_GO_PACKAGE}" + local go_pkg_basedir + go_pkg_basedir="$(dirname "${go_pkg_dir}")" + + mkdir -p "${go_pkg_basedir}" + rm -f "${go_pkg_dir}" + + # TODO: This symlink should be relative. + ln -s "${OS_ROOT}" "${go_pkg_dir}" + + # lots of tools "just don't work" unless we're in the GOPATH + cd "${go_pkg_dir}" || exit 1 + + # Append OS_EXTRA_GOPATH to the GOPATH if it is defined. + if [[ -n ${OS_EXTRA_GOPATH:-} ]]; then + GOPATH="${GOPATH}:${OS_EXTRA_GOPATH}" + fi + + export GOPATH + export OS_TARGET_BIN +} +readonly -f os::build::setup_env + +# Build static binary targets. +# +# Input: +# $@ - targets and go flags. If no targets are set then all binaries targets +# are built. +# OS_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset +# then just the host architecture is built. +function os::build::build_static_binaries() { + CGO_ENABLED=0 os::build::build_binaries -installsuffix=cgo "$@" +} +readonly -f os::build::build_static_binaries + +# Build binary targets specified +# +# Input: +# $@ - targets and go flags. If no targets are set then all binaries targets +# are built. +# OS_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset +# then just the host architecture is built. +function os::build::build_binaries() { + if [[ $# -eq 0 ]]; then + return + fi + local -a binaries=( "$@" ) + # Create a sub-shell so that we don't pollute the outer environment + ( os::build::internal::build_binaries "${binaries[@]+"${binaries[@]}"}" ) +} + +# Build binary targets specified. Should always be run in a sub-shell so we don't leak GOBIN +# +# Input: +# $@ - targets and go flags. If no targets are set then all binaries targets +# are built. +# OS_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset +# then just the host architecture is built. +os::build::internal::build_binaries() { + # Check for `go` binary and set ${GOPATH}. + os::build::setup_env + + # Fetch the version. + local version_ldflags + version_ldflags=$(os::build::ldflags) + + local goflags + # Use eval to preserve embedded quoted strings. + eval "goflags=(${OS_GOFLAGS:-})" + gogcflags="${GOGCFLAGS:-}" + + local arg + for arg; do + if [[ "${arg}" == -* ]]; then + # Assume arguments starting with a dash are flags to pass to go. + goflags+=("${arg}") + fi + done + + os::build::export_targets "$@" + + if [[ ! "${targets[*]:+${targets[*]}}" || ! "${binaries[*]:+${binaries[*]}}" ]]; then + return 0 + fi + + local -a nonstatics=() + local -a tests=() + for binary in "${binaries[@]-}"; do + if [[ "${binary}" =~ ".test"$ ]]; then + tests+=("$binary") + else + nonstatics+=("$binary") + fi + done + + local pkgdir="${OS_OUTPUT_PKGDIR}" + if [[ "${CGO_ENABLED-}" == "0" ]]; then + pkgdir+="/static" + fi + + local host_platform + host_platform=$(os::build::host_platform) + local platform + for platform in "${platforms[@]+"${platforms[@]}"}"; do + echo "++ Building go targets for ${platform}:" "${targets[@]}" + mkdir -p "${OS_OUTPUT_BINPATH}/${platform}" + + # output directly to the desired location + if [[ "$platform" == "$host_platform" ]]; then + export GOBIN="${OS_OUTPUT_BINPATH}/${platform}" + else + unset GOBIN + fi + + local platform_gotags_envvar + platform_gotags_envvar=OS_GOFLAGS_TAGS_$(os::build::platform_arch "${platform}") + local platform_gotags_test_envvar + platform_gotags_test_envvar=OS_GOFLAGS_TAGS_TEST_$(os::build::platform_arch "${platform}") + + # work around https://github.com/golang/go/issues/11887 + local local_ldflags="${version_ldflags}" + if [[ "${platform}" == "darwin/amd64" ]]; then + local_ldflags+=" -s" + fi + + #Add Windows File Properties/Version Info and Icon Resource for oc.exe + if [[ "$platform" == "windows/amd64" ]]; then + os::build::generate_windows_versioninfo + fi + + if [[ ${#nonstatics[@]} -gt 0 ]]; then + GOOS=${platform%/*} GOARCH=${platform##*/} go install \ + -tags "${OS_GOFLAGS_TAGS-} ${!platform_gotags_envvar:-}" \ + -ldflags="${local_ldflags}" \ + "${goflags[@]:+${goflags[@]}}" \ + -gcflags "${gogcflags}" \ + "${nonstatics[@]}" + + # GOBIN is not supported on cross-compile in Go 1.5+ - move to the correct target + if [[ "$platform" != "$host_platform" ]]; then + local platform_src="/${platform//\//_}" + mv "${OS_TARGET_BIN}/${platform_src}/"* "${OS_OUTPUT_BINPATH}/${platform}/" + fi + fi + + if [[ "$platform" == "windows/amd64" ]]; then + os::build::clean_windows_versioninfo + fi + + for test in "${tests[@]:+${tests[@]}}"; do + local outfile + outfile="${OS_OUTPUT_BINPATH}/${platform}/$(basename "${test}")" + # disabling cgo allows use of delve + CGO_ENABLED="${OS_TEST_CGO_ENABLED:-}" GOOS=${platform%/*} GOARCH=${platform##*/} go test \ + -tags "${OS_GOFLAGS_TAGS-} ${!platform_gotags_test_envvar:-}" \ + -ldflags "${local_ldflags}" \ + -i -c -o "${outfile}" \ + "${goflags[@]:+${goflags[@]}}" \ + "$(dirname "${test}")" + done + done + + os::build::check_binaries +} +readonly -f os::build::build_binaries + + # Generates the set of target packages, binaries, and platforms to build for. +# Accepts binaries via $@, and platforms via OS_BUILD_PLATFORMS, or defaults to +# the current platform. +function os::build::export_targets() { + platforms=("${OS_BUILD_PLATFORMS[@]:+${OS_BUILD_PLATFORMS[@]}}") + + targets=() + local arg + for arg; do + if [[ "${arg}" != -* ]]; then + targets+=("${arg}") + fi + done + + binaries=($(os::build::binaries_from_targets "${targets[@]-}")) +} +readonly -f os::build::export_targets + +# This will take $@ from $GOPATH/bin and copy them to the appropriate +# place in ${OS_OUTPUT_BINDIR} +# +# If OS_RELEASE_ARCHIVE is set, tar archives prefixed with OS_RELEASE_ARCHIVE for +# each of OS_BUILD_PLATFORMS are created. +# +# Ideally this wouldn't be necessary and we could just set GOBIN to +# OS_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go +# install' will place binaries that match the host platform directly in $GOBIN +# while placing cross compiled binaries into `platform_arch` subdirs. This +# complicates pretty much everything else we do around packaging and such. +function os::build::place_bins() { + ( + local host_platform + host_platform=$(os::build::host_platform) + + if [[ "${OS_RELEASE_ARCHIVE-}" != "" ]]; then + os::build::version::get_vars + mkdir -p "${OS_OUTPUT_RELEASEPATH}" + fi + + os::build::export_targets "$@" + for platform in "${platforms[@]+"${platforms[@]}"}"; do + # The substitution on platform_src below will replace all slashes with + # underscores. It'll transform darwin/amd64 -> darwin_amd64. + local platform_src="/${platform//\//_}" + + # Skip this directory if the platform has no binaries. + if [[ ! -d "${OS_OUTPUT_BINPATH}/${platform}" ]]; then + continue + fi + + # Create an array of binaries to release. Append .exe variants if the platform is windows. + local -a binaries=() + for binary in "${targets[@]}"; do + binary=$(basename "$binary") + if [[ $platform == "windows/amd64" ]]; then + binaries+=("${binary}.exe") + else + binaries+=("${binary}") + fi + done + + # If no release archive was requested, we're done. + if [[ "${OS_RELEASE_ARCHIVE-}" == "" ]]; then + continue + fi + + # Create a temporary bin directory containing only the binaries marked for release. + local release_binpath + release_binpath=$(mktemp -d "openshift.release.${OS_RELEASE_ARCHIVE}.XXX") + for binary in "${binaries[@]}"; do + cp "${OS_OUTPUT_BINPATH}/${platform}/${binary}" "${release_binpath}/" + done + + # Create the release archive. + platform="$( os::build::host_platform_friendly "${platform}" )" + if [[ ${OS_RELEASE_ARCHIVE} == "openshift-origin" ]]; then + for file in "${OS_BINARY_RELEASE_CLIENT_EXTRA[@]}"; do + cp "${file}" "${release_binpath}/" + done + if [[ $platform == "linux-64bit" ]]; then + OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" + elif [[ $platform == "linux-powerpc64" ]]; then + OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" + elif [[ $platform == "linux-arm64" ]]; then + OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" + elif [[ $platform == "linux-s390" ]]; then + OS_RELEASE_ARCHIVE="openshift-origin-server" os::build::archive::tar "${OS_BINARY_RELEASE_SERVER_LINUX[@]}" + else + echo "++ ERROR: No release type defined for $platform" + fi + else + if [[ $platform == "linux-64bit" || $platform == "linux-powerpc64" || $platform == "linux-arm64" || $platform == "linux-s390" ]]; then + os::build::archive::tar "./*" + else + echo "++ ERROR: No release type defined for $platform" + fi + fi + rm -rf "${release_binpath}" + done + ) +} +readonly -f os::build::place_bins + +# os::build::release_sha calculates a SHA256 checksum over the contents of the +# built release directory. +function os::build::release_sha() { + pushd "${OS_OUTPUT_RELEASEPATH}" &> /dev/null || exit 1 + find . -maxdepth 1 -type f | xargs sha256sum > CHECKSUM + popd &> /dev/null || exit 1 +} +readonly -f os::build::release_sha + +# os::build::make_openshift_binary_symlinks makes symlinks for the openshift +# binary in _output/local/bin/${platform} +function os::build::make_openshift_binary_symlinks() { + platform=$(os::build::host_platform) +} +readonly -f os::build::make_openshift_binary_symlinks + +# DEPRECATED: will be removed +function os::build::ldflag() { + local key=${1} + local val=${2} + + echo "-X ${key}=${val}" +} +readonly -f os::build::ldflag + +# os::build::require_clean_tree exits if the current Git tree is not clean. +function os::build::require_clean_tree() { + if ! git diff-index --quiet HEAD -- || test "$(git ls-files --exclude-standard --others | wc -l)" != 0; then + echo "You can't have any staged or dirty files in $(pwd) for this command." + echo "Either commit them or unstage them to continue." + exit 1 + fi +} +readonly -f os::build::require_clean_tree + +# os::build::commit_range takes one or two arguments - if the first argument is an +# integer, it is assumed to be a pull request and the local origin/pr/# branch is +# used to determine the common range with the second argument. If the first argument +# is not an integer, it is assumed to be a Git commit range and output directly. +function os::build::commit_range() { + local remote + remote="${UPSTREAM_REMOTE:-origin}" + if [[ "$1" =~ ^-?[0-9]+$ ]]; then + local target + target="$(git rev-parse "${remote}/pr/$1")" + if [[ $? -ne 0 ]]; then + echo "Branch does not exist, or you have not configured ${remote}/pr/* style branches from GitHub" 1>&2 + exit 1 + fi + + local base + base="$(git merge-base "${target}" "$2")" + if [[ $? -ne 0 ]]; then + echo "Branch has no common commits with $2" 1>&2 + exit 1 + fi + if [[ "${base}" == "${target}" ]]; then + + # DO NOT TRUST THIS CODE + merged="$(git rev-list --reverse "${target}".."$2" --ancestry-path | head -1)" + if [[ -z "${merged}" ]]; then + echo "Unable to find the commit that merged ${remote}/pr/$1" 1>&2 + exit 1 + fi + #if [[ $? -ne 0 ]]; then + # echo "Unable to find the merge commit for $1: ${merged}" 1>&2 + # exit 1 + #fi + echo "++ pr/$1 appears to have merged at ${merged}" 1>&2 + leftparent="$(git rev-list --parents -n 1 "${merged}" | cut -f2 -d ' ')" + if [[ $? -ne 0 ]]; then + echo "Unable to find the left-parent for the merge of for $1" 1>&2 + exit 1 + fi + base="$(git merge-base "${target}" "${leftparent}")" + if [[ $? -ne 0 ]]; then + echo "Unable to find the common commit between ${leftparent} and $1" 1>&2 + exit 1 + fi + echo "${base}..${target}" + exit 0 + #echo "Branch has already been merged to upstream master, use explicit range instead" 1>&2 + #exit 1 + fi + + echo "${base}...${target}" + exit 0 + fi + + echo "$1" +} +readonly -f os::build::commit_range diff --git a/openshift-hack/lib/build/rpm.sh b/openshift-hack/lib/build/rpm.sh new file mode 100644 index 0000000000000..275602de6f067 --- /dev/null +++ b/openshift-hack/lib/build/rpm.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +# This library holds utilities for building RPMs from Origin. + +# os::build::rpm::generate_nevra_vars determines the NEVRA of the RPMs +# that would be built from the current git state. +# +# Globals: +# - OS_GIT_VERSION +# Arguments: +# - None +# Exports: +# - OS_RPM_VERSION +# - OS_RPM_RELEASE +# - OS_RPM_ARCHITECTURE +function os::build::rpm::get_nvra_vars() { + # the package name can be overwritten but is normally 'origin' + OS_RPM_ARCHITECTURE="$(uname -i)" + + # we can extract the package version from the build version + os::build::version::get_vars + if [[ "${OS_GIT_VERSION}" =~ ^v([0-9](\.[0-9]+)*)(.*) ]]; then + OS_RPM_VERSION="${BASH_REMATCH[1]}" + metadata="${BASH_REMATCH[3]}" + else + os::log::fatal "Malformed \$OS_GIT_VERSION: ${OS_GIT_VERSION}" + fi + + # we can generate the package release from the git version metadata + # OS_GIT_VERSION will always have metadata, but either contain + # pre-release information _and_ build metadata, or only the latter. + # Build metadata may or may not contain the number of commits past + # the last tag. If no commit number exists, we are on a tag and use 0. + # ex. + # -alpha.0+shasums-123-dirty + # -alpha.0+shasums-123 + # -alpha.0+shasums-dirty + # -alpha.0+shasums + # +shasums-123-dirty + # +shasums-123 + # +shasums-dirty + # +shasums + if [[ "${metadata:0:1}" == "+" ]]; then + # we only have build metadata, but need to massage it so + # we can generate a valid RPM release from it + if [[ "${metadata}" =~ ^\+([a-z0-9]{7,40})(-([0-9]+))?(-dirty)?$ ]]; then + build_sha="${BASH_REMATCH[1]}" + build_num="${BASH_REMATCH[3]:-0}" + else + os::log::fatal "Malformed git version metadata: ${metadata}" + fi + OS_RPM_RELEASE="1.${build_num}.${build_sha}" + elif [[ "${metadata:0:1}" == "-" ]]; then + # we have both build metadata and pre-release info + if [[ "${metadata}" =~ ^-([^\+]+)\+([a-z0-9]{7,40})(-([0-9]+))?(-dirty)?$ ]]; then + pre_release="${BASH_REMATCH[1]}" + build_sha="${BASH_REMATCH[2]}" + build_num="${BASH_REMATCH[4]:-0}" + else + os::log::fatal "Malformed git version metadata: ${metadata}" + fi + OS_RPM_RELEASE="0.${pre_release}.${build_num}.${build_sha}" + else + os::log::fatal "Malformed git version metadata: ${metadata}" + fi + + OS_RPM_GIT_VARS=$( os::build::version::save_vars | tr '\n' ' ' ) + + export OS_RPM_VERSION OS_RPM_RELEASE OS_RPM_ARCHITECTURE OS_RPM_GIT_VARS +} + + +# os::build::rpm::format_nvra formats the rpm NVRA vars generated by +# os::build::rpm::get_nvra_vars and will generate them if necessary +# +# Globals: +# - OS_RPM_NAME +# - OS_RPM_VERSION +# - OS_RPM_RELEASE +# - OS_RPM_ARCHITECTURE +# Arguments: +# None +# Returns: +# None +function os::build::rpm::format_nvra() { + if [[ -z "${OS_RPM_VERSION:-}" || -z "${OS_RPM_RELEASE:-}" ]]; then + os::build::rpm::get_nvra_vars + fi + if [[ -z "${OS_RPM_NAME-}" ]]; then + OS_RPM_SPECFILE="$( find "${OS_ROOT}" -name '*.spec' )" + OS_RPM_NAME="$( rpmspec -q --qf '%{name}\n' "${OS_RPM_SPECFILE}" | head -1 )" + fi + + echo "${OS_RPM_NAME}-${OS_RPM_VERSION}-${OS_RPM_RELEASE}.${OS_RPM_ARCHITECTURE}" +} diff --git a/openshift-hack/lib/build/version.sh b/openshift-hack/lib/build/version.sh new file mode 100644 index 0000000000000..ea52257486f35 --- /dev/null +++ b/openshift-hack/lib/build/version.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +# This library holds utility functions for determining +# product versions from Git repository state. + +# os::build::version::get_vars loads the standard version variables as +# ENV vars +function os::build::version::get_vars() { + if [[ -n "${OS_VERSION_FILE-}" ]]; then + if [[ -f "${OS_VERSION_FILE}" ]]; then + source "${OS_VERSION_FILE}" + return + fi + if [[ ! -d "${OS_ROOT}/.git" ]]; then + os::log::fatal "No version file at ${OS_VERSION_FILE}" + fi + os::log::warning "No version file at ${OS_VERSION_FILE}, falling back to git versions" + fi + os::build::version::git_vars +} +readonly -f os::build::version::get_vars + +# os::build::version::git_vars looks up the current Git vars if they have not been calculated. +function os::build::version::git_vars() { + if [[ -n "${OS_GIT_VERSION-}" ]]; then + return 0 + fi + + local git=(git --work-tree "${OS_ROOT}") + + if [[ -n ${OS_GIT_COMMIT-} ]] || OS_GIT_COMMIT=$("${git[@]}" rev-parse --short "HEAD^{commit}" 2>/dev/null); then + if [[ -z ${OS_GIT_TREE_STATE-} ]]; then + # Check if the tree is dirty. default to dirty + if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then + OS_GIT_TREE_STATE="clean" + else + OS_GIT_TREE_STATE="dirty" + fi + fi + # Use git describe to find the version based on annotated tags. + if [[ -n ${OS_GIT_VERSION-} ]] || OS_GIT_VERSION=$(sed -rn 's/.*io.openshift.build.versions="kubernetes=(1.[0-9]+.[0-9]+(-rc.[0-9])?)"/v\1/p' openshift-hack/images/hyperkube/Dockerfile.rhel); then + # combine GIT_COMMIT with GIT_VERSION which is being read from the above Dockerfile + OS_GIT_VERSION+="+${OS_GIT_COMMIT:0:7}" + # Try to match the "git describe" output to a regex to try to extract + # the "major" and "minor" versions and whether this is the exact tagged + # version or whether the tree is between two tagged versions. + if [[ "${OS_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)*([-].*)?$ ]]; then + OS_GIT_MAJOR=${BASH_REMATCH[1]} + OS_GIT_MINOR=${BASH_REMATCH[2]} + OS_GIT_PATCH=${BASH_REMATCH[3]} + fi + + if [[ "${OS_GIT_TREE_STATE}" == "dirty" ]]; then + # git describe --dirty only considers changes to existing files, but + # that is problematic since new untracked .go files affect the build, + # so use our idea of "dirty" from git status instead. + OS_GIT_VERSION+="-dirty" + fi + fi + fi + +} +readonly -f os::build::version::git_vars + +# Saves the environment flags to $1 +function os::build::version::save_vars() { + # Set the kube vars to the os vars to ensure correct versioning + # when using rpmbuild. This is necessary to ensure the kube build + # tooling correctly sets the version of binaries when building + # from source. + cat <> "${JUNIT_REPORT_OUTPUT:-/dev/null}" ) + os::test::junit::declare_test_end + return "${return_code}" +} +readonly -f os::cmd::internal::expect_exit_code_run_grep + +# os::cmd::internal::init_tempdir initializes the temporary directory +function os::cmd::internal::init_tempdir() { + mkdir -p "${os_cmd_internal_tmpdir}" + rm -f "${os_cmd_internal_tmpdir}"/tmp_std{out,err}.log +} +readonly -f os::cmd::internal::init_tempdir + +# os::cmd::internal::describe_call determines the file:line of the latest function call made +# from outside of this file in the call stack, and the name of the function being called from +# that line, returning a string describing the call +function os::cmd::internal::describe_call() { + local cmd=$1 + local cmd_eval_func=$2 + local grep_args=${3:-} + local test_eval_func=${4:-} + + local caller_id + caller_id=$(os::cmd::internal::determine_caller) + local full_name="${caller_id}: executing '${cmd}'" + + local cmd_expectation + cmd_expectation=$(os::cmd::internal::describe_expectation "${cmd_eval_func}") + local full_name="${full_name} expecting ${cmd_expectation}" + + if [[ -n "${grep_args}" ]]; then + local text_expecting= + case "${test_eval_func}" in + "os::cmd::internal::success_func") + text_expecting="text" ;; + "os::cmd::internal::failure_func") + text_expecting="not text" ;; + esac + full_name="${full_name} and ${text_expecting} '${grep_args}'" + fi + + echo "${full_name}" +} +readonly -f os::cmd::internal::describe_call + +# os::cmd::internal::determine_caller determines the file relative to the OpenShift Origin root directory +# and line number of the function call to the outer os::cmd wrapper function +function os::cmd::internal::determine_caller() { + local call_depth= + local len_sources="${#BASH_SOURCE[@]}" + for (( i=0; i>"${os_cmd_internal_tmpout}" 2>>"${os_cmd_internal_tmperr}" || result=$? + local result=${result:-0} # if we haven't set result yet, the command succeeded + + return "${result}" +} +readonly -f os::cmd::internal::run_collecting_output + +# os::cmd::internal::success_func determines if the input exit code denotes success +# this function returns 0 for false and 1 for true to be compatible with arithmetic tests +function os::cmd::internal::success_func() { + local exit_code=$1 + + # use a negated test to get output correct for (( )) + [[ "${exit_code}" -ne "0" ]] + return $? +} +readonly -f os::cmd::internal::success_func + +# os::cmd::internal::failure_func determines if the input exit code denotes failure +# this function returns 0 for false and 1 for true to be compatible with arithmetic tests +function os::cmd::internal::failure_func() { + local exit_code=$1 + + # use a negated test to get output correct for (( )) + [[ "${exit_code}" -eq "0" ]] + return $? +} +readonly -f os::cmd::internal::failure_func + +# os::cmd::internal::specific_code_func determines if the input exit code matches the given code +# this function returns 0 for false and 1 for true to be compatible with arithmetic tests +function os::cmd::internal::specific_code_func() { + local expected_code=$1 + local exit_code=$2 + + # use a negated test to get output correct for (( )) + [[ "${exit_code}" -ne "${expected_code}" ]] + return $? +} +readonly -f os::cmd::internal::specific_code_func + +# os::cmd::internal::get_results prints the stderr and stdout files +function os::cmd::internal::get_results() { + cat "${os_cmd_internal_tmpout}" "${os_cmd_internal_tmperr}" +} +readonly -f os::cmd::internal::get_results + +# os::cmd::internal::get_last_results prints the stderr and stdout from the last attempt +function os::cmd::internal::get_last_results() { + awk 'BEGIN { RS = "\x1e" } END { print $0 }' "${os_cmd_internal_tmpout}" + awk 'BEGIN { RS = "\x1e" } END { print $0 }' "${os_cmd_internal_tmperr}" +} +readonly -f os::cmd::internal::get_last_results + +# os::cmd::internal::mark_attempt marks the end of an attempt in the stdout and stderr log files +# this is used to make the try_until_* output more concise +function os::cmd::internal::mark_attempt() { + echo -e '\x1e' >> "${os_cmd_internal_tmpout}" + echo -e '\x1e' >> "${os_cmd_internal_tmperr}" +} +readonly -f os::cmd::internal::mark_attempt + +# os::cmd::internal::compress_output compresses an output file into timeline representation +function os::cmd::internal::compress_output() { + local logfile=$1 + + awk -f "${OS_ROOT}/hack/lib/compress.awk" "${logfile}" +} +readonly -f os::cmd::internal::compress_output + +# os::cmd::internal::print_results pretty-prints the stderr and stdout files. If attempt separators +# are present, this function returns a concise view of the stdout and stderr output files using a +# timeline format, where consecutive output lines that are the same are condensed into one line +# with a counter +function os::cmd::internal::print_results() { + if [[ -s "${os_cmd_internal_tmpout}" ]]; then + echo "Standard output from the command:" + if grep -q $'\x1e' "${os_cmd_internal_tmpout}"; then + os::cmd::internal::compress_output "${os_cmd_internal_tmpout}" + else + cat "${os_cmd_internal_tmpout}"; echo + fi + else + echo "There was no output from the command." + fi + + if [[ -s "${os_cmd_internal_tmperr}" ]]; then + echo "Standard error from the command:" + if grep -q $'\x1e' "${os_cmd_internal_tmperr}"; then + os::cmd::internal::compress_output "${os_cmd_internal_tmperr}" + else + cat "${os_cmd_internal_tmperr}"; echo + fi + else + echo "There was no error output from the command." + fi +} +readonly -f os::cmd::internal::print_results + +# os::cmd::internal::assemble_causes determines from the two input booleans which part of the test +# failed and generates a nice delimited list of failure causes +function os::cmd::internal::assemble_causes() { + local cmd_succeeded=$1 + local test_succeeded=$2 + + local causes=() + if (( ! cmd_succeeded )); then + causes+=("the command returned the wrong error code") + fi + if (( ! test_succeeded )); then + causes+=("the output content test failed") + fi + + local list + list=$(printf '; %s' "${causes[@]}") + echo "${list:2}" +} +readonly -f os::cmd::internal::assemble_causes + +# os::cmd::internal::run_until_exit_code runs the provided command until the exit code test given +# succeeds or the timeout given runs out. Output from the command to be tested is suppressed unless +# either `VERBOSE=1` or the test fails. This function bypasses any error exiting settings or traps +# set by upstream callers by masking the return code of the command with the return code of setting +# the result variable on failure. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - VERBOSE +# Arguments: +# - 1: the command to run +# - 2: command evaluation assertion to use +# - 3: timeout duration +# - 4: interval duration +# Returns: +# - 0: if all assertions met before timeout +# - 1: if timeout occurs +function os::cmd::internal::run_until_exit_code() { + local cmd=$1 + local cmd_eval_func=$2 + local duration=$3 + local interval=$4 + + local -a junit_log + + os::cmd::internal::init_tempdir + os::test::junit::declare_test_start + + local description + description=$(os::cmd::internal::describe_call "${cmd}" "${cmd_eval_func}") + local duration_seconds + duration_seconds=$(echo "scale=3; $(( duration )) / 1000" | bc | xargs printf '%5.3f') + local description="${description}; re-trying every ${interval}s until completion or ${duration_seconds}s" + local preamble="Running ${description}..." + echo "${preamble}" + # for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';' + junit_log+=( "${description//$'\n'/;}" ) + + local start_time + start_time=$(os::cmd::internal::seconds_since_epoch) + + local deadline=$(( $(date +%s000) + duration )) + local cmd_succeeded=0 + while [ "$(date +%s000)" -lt $deadline ]; do + local cmd_result + cmd_result=$( os::cmd::internal::run_collecting_output "${cmd}"; echo $? ) + cmd_succeeded=$( ${cmd_eval_func} "${cmd_result}"; echo $? ) + if (( cmd_succeeded )); then + break + fi + sleep "${interval}" + os::cmd::internal::mark_attempt + done + + local end_time + end_time=$(os::cmd::internal::seconds_since_epoch) + local time_elapsed + time_elapsed=$(echo "scale=9; ${end_time} - ${start_time}" | bc | xargs printf '%5.3f') # in decimal seconds, we need leading zeroes for parsing later + + # clear the preamble so we can print out the success or error message + os::text::clear_string "${preamble}" + + local return_code + if (( cmd_succeeded )); then + os::text::print_green "SUCCESS after ${time_elapsed}s: ${description}" + junit_log+=( "SUCCESS after ${time_elapsed}s: ${description//$'\n'/;}" ) + + if [[ -n ${VERBOSE-} ]]; then + os::cmd::internal::print_results + fi + return_code=0 + else + os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${description}: the command timed out" + junit_log+=( "FAILURE after ${time_elapsed}s: ${description//$'\n'/;}: the command timed out" ) + + os::text::print_red "$(os::cmd::internal::print_results)" + return_code=1 + fi + + junit_log+=( "$(os::cmd::internal::print_results)" ) + ( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" ) + os::test::junit::declare_test_end + return "${return_code}" +} +readonly -f os::cmd::internal::run_until_exit_code + +# os::cmd::internal::run_until_text runs the provided command until the assertion function succeeds with +# the given text on the command output or the timeout given runs out. This can be used to run until the +# output does or does not contain some text. Output from the command to be tested is suppressed unless +# either `VERBOSE=1` or the test fails. This function bypasses any error exiting settings or traps +# set by upstream callers by masking the return code of the command with the return code of setting +# the result variable on failure. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - VERBOSE +# Arguments: +# - 1: the command to run +# - 2: text to test for +# - 3: text assertion to use +# - 4: timeout duration +# - 5: interval duration +# Returns: +# - 0: if all assertions met before timeout +# - 1: if timeout occurs +function os::cmd::internal::run_until_text() { + local cmd=$1 + local text=$2 + local test_eval_func=${3:-os::cmd::internal::success_func} + local duration=$4 + local interval=$5 + + local -a junit_log + + os::cmd::internal::init_tempdir + os::test::junit::declare_test_start + + local description + description=$(os::cmd::internal::describe_call "${cmd}" "" "${text}" "${test_eval_func}") + local duration_seconds + duration_seconds=$(echo "scale=3; $(( duration )) / 1000" | bc | xargs printf '%5.3f') + local description="${description}; re-trying every ${interval}s until completion or ${duration_seconds}s" + local preamble="Running ${description}..." + echo "${preamble}" + # for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';' + junit_log+=( "${description//$'\n'/;}" ) + + local start_time + start_time=$(os::cmd::internal::seconds_since_epoch) + + local deadline + deadline=$(( $(date +%s000) + duration )) + local test_succeeded=0 + while [ "$(date +%s000)" -lt $deadline ]; do + local cmd_result= + cmd_result=$( os::cmd::internal::run_collecting_output "${cmd}"; echo $? ) + local test_result + test_result=$( os::cmd::internal::run_collecting_output 'grep -Eq "'"${text}"'" <(os::cmd::internal::get_last_results)'; echo $? ) + test_succeeded=$( ${test_eval_func} "${test_result}"; echo $? ) + + if (( test_succeeded )); then + break + fi + sleep "${interval}" + os::cmd::internal::mark_attempt + done + + local end_time + end_time=$(os::cmd::internal::seconds_since_epoch) + local time_elapsed + time_elapsed=$(echo "scale=9; ${end_time} - ${start_time}" | bc | xargs printf '%5.3f') # in decimal seconds, we need leading zeroes for parsing later + + # clear the preamble so we can print out the success or error message + os::text::clear_string "${preamble}" + + local return_code + if (( test_succeeded )); then + os::text::print_green "SUCCESS after ${time_elapsed}s: ${description}" + junit_log+=( "SUCCESS after ${time_elapsed}s: ${description//$'\n'/;}" ) + + if [[ -n ${VERBOSE-} ]]; then + os::cmd::internal::print_results + fi + return_code=0 + else + os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${description}: the command timed out" + junit_log+=( "FAILURE after ${time_elapsed}s: ${description//$'\n'/;}: the command timed out" ) + + os::text::print_red "$(os::cmd::internal::print_results)" + return_code=1 + fi + + junit_log+=( "$(os::cmd::internal::print_results)" ) + ( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" ) + os::test::junit::declare_test_end + return "${return_code}" +} +readonly -f os::cmd::internal::run_until_text diff --git a/openshift-hack/lib/constants.sh b/openshift-hack/lib/constants.sh new file mode 100755 index 0000000000000..3552d53115d68 --- /dev/null +++ b/openshift-hack/lib/constants.sh @@ -0,0 +1,324 @@ +#!/usr/bin/env bash + +# This script provides constants for the Golang binary build process + +readonly OS_GO_PACKAGE=github.com/openshift/origin + +readonly OS_BUILD_ENV_GOLANG="${OS_BUILD_ENV_GOLANG:-1.15}" +readonly OS_BUILD_ENV_IMAGE="${OS_BUILD_ENV_IMAGE:-openshift/origin-release:golang-${OS_BUILD_ENV_GOLANG}}" +readonly OS_REQUIRED_GO_VERSION="go${OS_BUILD_ENV_GOLANG}" +readonly OS_GLIDE_MINOR_VERSION="13" +readonly OS_REQUIRED_GLIDE_VERSION="0.$OS_GLIDE_MINOR_VERSION" + +readonly OS_GOFLAGS_TAGS="include_gcs include_oss containers_image_openpgp" +readonly OS_GOFLAGS_TAGS_LINUX_AMD64="gssapi selinux" +readonly OS_GOFLAGS_TAGS_LINUX_S390X="gssapi selinux" +readonly OS_GOFLAGS_TAGS_LINUX_ARM64="gssapi selinux" +readonly OS_GOFLAGS_TAGS_LINUX_PPC64LE="gssapi selinux" + +readonly OS_OUTPUT_BASEPATH="${OS_OUTPUT_BASEPATH:-_output}" +readonly OS_BASE_OUTPUT="${OS_ROOT}/${OS_OUTPUT_BASEPATH}" +readonly OS_OUTPUT_SCRIPTPATH="${OS_OUTPUT_SCRIPTPATH:-"${OS_BASE_OUTPUT}/scripts"}" + +readonly OS_OUTPUT_SUBPATH="${OS_OUTPUT_SUBPATH:-${OS_OUTPUT_BASEPATH}/local}" +readonly OS_OUTPUT="${OS_ROOT}/${OS_OUTPUT_SUBPATH}" +readonly OS_OUTPUT_RELEASEPATH="${OS_OUTPUT}/releases" +readonly OS_OUTPUT_RPMPATH="${OS_OUTPUT_RELEASEPATH}/rpms" +readonly OS_OUTPUT_BINPATH="${OS_OUTPUT}/bin" +readonly OS_OUTPUT_PKGDIR="${OS_OUTPUT}/pkgdir" + +readonly OS_IMAGE_COMPILE_TARGETS_LINUX=( + vendor/k8s.io/kubernetes/cmd/kube-apiserver + vendor/k8s.io/kubernetes/cmd/kube-controller-manager + vendor/k8s.io/kubernetes/cmd/kube-scheduler + vendor/k8s.io/kubernetes/cmd/kubelet +) +readonly OS_SCRATCH_IMAGE_COMPILE_TARGETS_LINUX=( + "" +) +readonly OS_IMAGE_COMPILE_BINARIES=("${OS_SCRATCH_IMAGE_COMPILE_TARGETS_LINUX[@]##*/}" "${OS_IMAGE_COMPILE_TARGETS_LINUX[@]##*/}") + +readonly OS_GOVET_BLACKLIST=( +) + +#If you update this list, be sure to get the images/origin/Dockerfile +readonly OS_BINARY_RELEASE_SERVER_LINUX=( + './*' +) +readonly OS_BINARY_RELEASE_CLIENT_EXTRA=( + ${OS_ROOT}/README.md + ${OS_ROOT}/LICENSE +) + +# os::build::get_product_vars exports variables that we expect to change +# depending on the distribution of Origin +function os::build::get_product_vars() { + export OS_BUILD_LDFLAGS_IMAGE_PREFIX="${OS_IMAGE_PREFIX:-"openshift/origin"}" + export OS_BUILD_LDFLAGS_DEFAULT_IMAGE_STREAMS="${OS_BUILD_LDFLAGS_DEFAULT_IMAGE_STREAMS:-"centos7"}" +} + +# os::build::ldflags calculates the -ldflags argument for building OpenShift +function os::build::ldflags() { + # Run this in a subshell to prevent settings/variables from leaking. + set -o errexit + set -o nounset + set -o pipefail + + cd "${OS_ROOT}" + + os::build::version::get_vars + os::build::get_product_vars + + local buildDate="$(date -u +'%Y-%m-%dT%H:%M:%SZ')" + + declare -a ldflags=( + "-s" + "-w" + ) + + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.majorFromGit" "${OS_GIT_MAJOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.minorFromGit" "${OS_GIT_MINOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.versionFromGit" "${OS_GIT_VERSION}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.commitFromGit" "${OS_GIT_COMMIT}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.gitTreeState" "${OS_GIT_TREE_STATE}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.buildDate" "${buildDate}")) + + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitMajor" "${KUBE_GIT_MAJOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitMinor" "${KUBE_GIT_MINOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitCommit" "${OS_GIT_COMMIT}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitVersion" "${KUBE_GIT_VERSION}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.buildDate" "${buildDate}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/component-base/version.gitTreeState" "clean")) + + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitMajor" "${KUBE_GIT_MAJOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitMinor" "${KUBE_GIT_MINOR}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitCommit" "${OS_GIT_COMMIT}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitVersion" "${KUBE_GIT_VERSION}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.buildDate" "${buildDate}")) + ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.gitTreeState" "clean") +) + + # The -ldflags parameter takes a single string, so join the output. + echo "${ldflags[*]-}" +} +readonly -f os::build::ldflags + +# os::util::list_go_src_files lists files we consider part of our project +# source code, useful for tools that iterate over source to provide vet- +# ting or linting, etc. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::list_go_src_files() { + find . -not \( \ + \( \ + -wholename './_output' \ + -o -wholename './.*' \ + -o -wholename './pkg/assets/bindata.go' \ + -o -wholename './pkg/assets/*/bindata.go' \ + -o -wholename './pkg/oc/clusterup/manifests/bindata.go' \ + -o -wholename './openshift.local.*' \ + -o -wholename './test/extended/testdata/bindata.go' \ + -o -wholename '*/vendor/*' \ + -o -wholename './assets/bower_components/*' \ + \) -prune \ + \) -name '*.go' | sort -u +} +readonly -f os::util::list_go_src_files + +# os::util::list_go_src_dirs lists dirs in origin/ and cmd/ dirs excluding +# doc.go, useful for tools that iterate over source to provide vetting or +# linting, or for godep-save etc. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::list_go_src_dirs() { + go list -e ./... | grep -Ev "/(third_party|vendor|staging|clientset_generated)/" | LC_ALL=C sort -u +} +readonly -f os::util::list_go_src_dirs + +# os::util::list_go_deps outputs the list of dependencies for the project. +function os::util::list_go_deps() { + go list -f '{{.ImportPath}}{{.Imports}}' ./test/... ./pkg/... ./cmd/... ./vendor/k8s.io/... | tr '[]' ' ' | + sed -e 's|github.com/openshift/origin/vendor/||g' | + sed -e 's|k8s.io/kubernetes/staging/src/||g' +} + +# os::util::list_test_packages_under lists all packages containing Golang test files that we +# want to run as unit tests under the given base dir in the source tree +function os::util::list_test_packages_under() { + local basedir=$* + + # we do not quote ${basedir} to allow for multiple arguments to be passed in as well as to allow for + # arguments that use expansion, e.g. paths containing brace expansion or wildcards + # we do not quote ${basedir} to allow for multiple arguments to be passed in as well as to allow for + # arguments that use expansion, e.g. paths containing brace expansion or wildcards + find ${basedir} -not \( \ + \( \ + -path 'vendor' \ + -o -path '*_output' \ + -o -path '*.git' \ + -o -path '*openshift.local.*' \ + -o -path '*vendor/*' \ + -o -path '*assets/node_modules' \ + -o -path '*test/*' \ + -o -path '*pkg/proxy' \ + -o -path '*k8s.io/kubernetes/cluster/gce*' \ + \) -prune \ + \) -name '*_test.go' | xargs -n1 dirname | sort -u | xargs -n1 printf "${OS_GO_PACKAGE}/%s\n" + + local kubernetes_path="vendor/k8s.io/kubernetes" + + if [[ -n "${TEST_KUBE-}" ]]; then + # we need to find all of the kubernetes test suites, excluding those we directly whitelisted before, the end-to-end suite, and + # cmd wasn't done before using glide and constantly flakes + # the forked etcd packages are used only by the gce etcd containers + find -L vendor/k8s.io/{api,apimachinery,apiserver,client-go,kube-aggregator,kubernetes} -not \( \ + \( \ + -path "${kubernetes_path}/staging" \ + -o -path "${kubernetes_path}/cmd" \ + -o -path "${kubernetes_path}/test" \ + -o -path "${kubernetes_path}/third_party/forked/etcd*" \ + -o -path "${kubernetes_path}/cluster/gce" \ + \) -prune \ + \) -name '*_test.go' | cut -f 2- -d / | xargs -n1 dirname | sort -u | xargs -n1 printf "${OS_GO_PACKAGE}/vendor/%s\n" + else + echo "${OS_GO_PACKAGE}/vendor/k8s.io/api/..." + echo "${OS_GO_PACKAGE}/vendor/k8s.io/kubernetes/pkg/api/..." + echo "${OS_GO_PACKAGE}/vendor/k8s.io/kubernetes/pkg/apis/..." + fi +} +readonly -f os::util::list_test_packages_under + +# Generates the .syso file used to add compile-time VERSIONINFO metadata to the +# Windows binary. +function os::build::generate_windows_versioninfo() { + os::build::version::get_vars + local major="${OS_GIT_MAJOR}" + local minor="${OS_GIT_MINOR%+}" + local patch="${OS_GIT_PATCH}" + local windows_versioninfo_file=`mktemp --suffix=".versioninfo.json"` + cat <"${windows_versioninfo_file}" +{ + "FixedFileInfo": + { + "FileVersion": { + "Major": ${major}, + "Minor": ${minor}, + "Patch": ${patch} + }, + "ProductVersion": { + "Major": ${major}, + "Minor": ${minor}, + "Patch": ${patch} + }, + "FileFlagsMask": "3f", + "FileFlags ": "00", + "FileOS": "040004", + "FileType": "01", + "FileSubType": "00" + }, + "StringFileInfo": + { + "Comments": "", + "CompanyName": "Red Hat, Inc.", + "InternalName": "openshift client", + "FileVersion": "${OS_GIT_VERSION}", + "InternalName": "oc", + "LegalCopyright": "© Red Hat, Inc. Licensed under the Apache License, Version 2.0", + "LegalTrademarks": "", + "OriginalFilename": "oc.exe", + "PrivateBuild": "", + "ProductName": "OpenShift Client", + "ProductVersion": "${OS_GIT_VERSION}", + "SpecialBuild": "" + }, + "VarFileInfo": + { + "Translation": { + "LangID": "0409", + "CharsetID": "04B0" + } + } +} +EOF + goversioninfo -o ${OS_ROOT}/vendor/github.com/openshift/oc/cmd/oc/oc.syso ${windows_versioninfo_file} +} +readonly -f os::build::generate_windows_versioninfo + +# Removes the .syso file used to add compile-time VERSIONINFO metadata to the +# Windows binary. +function os::build::clean_windows_versioninfo() { + rm ${OS_ROOT}/vendor/github.com/openshift/oc/cmd/oc/oc.syso +} +readonly -f os::build::clean_windows_versioninfo + +# OS_ALL_IMAGES is the list of images built by os::build::images. +readonly OS_ALL_IMAGES=( + origin-hyperkube + origin-tests +) + +# os::build::check_binaries ensures that binary sizes do not grow without approval. +function os::build::check_binaries() { + platform=$(os::build::host_platform) + if [[ "${platform}" != "linux/amd64" && "${platform}" != "darwin/amd64" ]]; then + return 0 + fi + duexe="du" + + # In OSX, the 'du' binary does not provide the --apparent-size flag. However, the homebrew + # provide GNU coreutils which provide 'gdu' binary which is equivalent to Linux du. + # For now, if the 'gdu' binary is not installed, print annoying warning and don't check the + # binary size (the CI will capture possible violation anyway). + if [[ "${platform}" == "darwin/amd64" ]]; then + duexe=$(which gdu || true) + if [[ -z "${duexe}" ]]; then + os::log::warning "Unable to locate 'gdu' binary to determine size of the binary. Please install it using: 'brew install coreutils'" + return 0 + fi + fi + + if [[ -f "${OS_OUTPUT_BINPATH}/${platform}/pod" ]]; then + size=$($duexe --apparent-size -m "${OS_OUTPUT_BINPATH}/${platform}/pod" | cut -f 1) + if [[ "${size}" -gt "2" ]]; then + os::log::fatal "pod binary has grown substantially to ${size}. You must have approval before bumping this limit." + fi + fi +} + +# os::build::images builds all images in this repo. +function os::build::images() { + # Create link to file if the FS supports hardlinks, otherwise copy the file + function ln_or_cp { + local src_file=$1 + local dst_dir=$2 + if os::build::archive::internal::is_hardlink_supported "${dst_dir}" ; then + ln -f "${src_file}" "${dst_dir}" + else + cp -pf "${src_file}" "${dst_dir}" + fi + } + + # determine the correct tag prefix + tag_prefix="${OS_IMAGE_PREFIX:-"openshift/origin"}" + + # images that depend on "${tag_prefix}-source" or "${tag_prefix}-base" + ( os::build::image "${tag_prefix}-hyperkube" images/hyperkube ) & + + for i in $(jobs -p); do wait "$i"; done + + # images that depend on "${tag_prefix}-cli" or hyperkube + ( os::build::image "${tag_prefix}-tests" images/tests ) & + + for i in $(jobs -p); do wait "$i"; done +} +readonly -f os::build::images diff --git a/openshift-hack/lib/deps.sh b/openshift-hack/lib/deps.sh new file mode 100644 index 0000000000000..6a9009823de1e --- /dev/null +++ b/openshift-hack/lib/deps.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# os::deps::path_with_shellcheck returns a path that includes shellcheck. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# The path that includes shellcheck. +function os::deps::path_with_shellcheck() { + local path="${PATH}" + if ! which shellcheck &> /dev/null; then + local shellcheck_path="${TMPDIR:-/tmp}/shellcheck" + mkdir -p "${shellcheck_path}" + pushd "${shellcheck_path}" > /dev/null || exit 1 + # This version needs to match that required by + # hack/verify-shellcheck.sh to avoid the use of docker. + local version="v0.7.0" + local tar_file="shellcheck-${version}.linux.x86_64.tar.xz" + curl -LO "https://github.com/koalaman/shellcheck/releases/download/${version}/${tar_file}" + tar xf "${tar_file}" + path="${PATH}:$(pwd)/shellcheck-${version}" + popd > /dev/null || exit 1 + fi + echo "${path}" +} +readonly -f os::deps::path_with_shellcheck diff --git a/openshift-hack/lib/init.sh b/openshift-hack/lib/init.sh new file mode 100755 index 0000000000000..00321b0ff7137 --- /dev/null +++ b/openshift-hack/lib/init.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# This script is meant to be the entrypoint for OpenShift Bash scripts to import all of the support +# libraries at once in order to make Bash script preambles as minimal as possible. This script recur- +# sively `source`s *.sh files in this directory tree. As such, no files should be `source`ed outside +# of this script to ensure that we do not attempt to overwrite read-only variables. + +set -o errexit +set -o nounset +set -o pipefail + +OS_SCRIPT_START_TIME="$( date +%s )"; export OS_SCRIPT_START_TIME + +# os::util::absolute_path returns the absolute path to the directory provided +function os::util::absolute_path() { + local relative_path="$1" + local absolute_path + + pushd "${relative_path}" >/dev/null + relative_path="$( pwd )" + if [[ -h "${relative_path}" ]]; then + absolute_path="$( readlink "${relative_path}" )" + else + absolute_path="${relative_path}" + fi + popd >/dev/null + + echo "${absolute_path}" +} +readonly -f os::util::absolute_path + +# find the absolute path to the root of the Origin source tree +init_source="$( dirname "${BASH_SOURCE[0]}" )/../.." +OS_ROOT="$( os::util::absolute_path "${init_source}" )" +export OS_ROOT +cd "${OS_ROOT}" + +for library_file in $( find "${OS_ROOT}/openshift-hack/lib" -type f -name '*.sh' -not -path '*/openshift-hack/lib/init.sh' ); do + source "${library_file}" +done + +unset library_files library_file init_source + +# all of our Bash scripts need to have the stacktrace +# handler installed to deal with errors +os::log::stacktrace::install + +# All of our Bash scripts need to have access to the +# binaries that we build so we don't have to find +# them before every invocation. +os::util::environment::update_path_var + +if [[ -z "${OS_TMP_ENV_SET-}" ]]; then + # if this file is run via 'source', then $0 will be "-bash" and won't work with basename + if [[ "${0}" =~ .*\.sh ]]; then + os::util::environment::setup_tmpdir_vars "$( basename "${0}" ".sh" )" + else + os::util::environment::setup_tmpdir_vars "shell" + fi +fi + +# Allow setting $JUNIT_REPORT to toggle output behavior +if [[ -n "${JUNIT_REPORT:-}" ]]; then + export JUNIT_REPORT_OUTPUT="${LOG_DIR}/raw_test_output.log" +fi + +# Use the go version from the system +export FORCE_HOST_GO=1 diff --git a/openshift-hack/lib/log/output.sh b/openshift-hack/lib/log/output.sh new file mode 100644 index 0000000000000..103fa1ff1bee5 --- /dev/null +++ b/openshift-hack/lib/log/output.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash + +# This file contains functions used for writing log messages +# to stdout and stderr from scripts while they run. + +# os::log::info writes the message to stdout. +# +# Arguments: +# - all: message to write +function os::log::info() { + local message; message="$( os::log::internal::prefix_lines "[INFO]" "$*" )" + os::log::internal::to_logfile "${message}" + echo "${message}" +} +readonly -f os::log::info + +# os::log::warning writes the message to stderr. +# A warning indicates something went wrong but +# not so wrong that we cannot recover. +# +# Arguments: +# - all: message to write +function os::log::warning() { + local message; message="$( os::log::internal::prefix_lines "[WARNING]" "$*" )" + os::log::internal::to_logfile "${message}" + os::text::print_yellow "${message}" 1>&2 +} +readonly -f os::log::warning + +# os::log::error writes the message to stderr. +# An error indicates that something went wrong +# and we will most likely fail after this. +# +# Arguments: +# - all: message to write +function os::log::error() { + local message; message="$( os::log::internal::prefix_lines "[ERROR]" "$*" )" + os::log::internal::to_logfile "${message}" + os::text::print_red "${message}" 1>&2 +} +readonly -f os::log::error + +# os::log::fatal writes the message to stderr and +# returns a non-zero code to force a process exit. +# A fatal error indicates that there is no chance +# of recovery. +# +# Arguments: +# - all: message to write +function os::log::fatal() { + local message; message="$( os::log::internal::prefix_lines "[FATAL]" "$*" )" + os::log::internal::to_logfile "${message}" + os::text::print_red "${message}" 1>&2 + exit 1 +} +readonly -f os::log::fatal + +# os::log::debug writes the message to stderr if +# the ${OS_DEBUG} variable is set. +# +# Globals: +# - OS_DEBUG +# Arguments: +# - all: message to write +function os::log::debug() { + local message; message="$( os::log::internal::prefix_lines "[DEBUG]" "$*" )" + os::log::internal::to_logfile "${message}" + if [[ -n "${OS_DEBUG:-}" ]]; then + os::text::print_blue "${message}" 1>&2 + fi +} +readonly -f os::log::debug + +# os::log::internal::to_logfile makes a best-effort +# attempt to write the message to the script logfile +# +# Globals: +# - LOG_DIR +# Arguments: +# - all: message to write +function os::log::internal::to_logfile() { + if [[ -n "${LOG_DIR:-}" && -d "${LOG_DIR-}" ]]; then + echo "$*" >>"${LOG_DIR}/scripts.log" + fi +} + +# os::log::internal::prefix_lines prints out the +# original content with the given prefix at the +# start of every line. +# +# Arguments: +# - 1: prefix for lines +# - 2: content to prefix +function os::log::internal::prefix_lines() { + local prefix="$1" + local content="$2" + + local old_ifs="${IFS}" + IFS=$'\n' + for line in ${content}; do + echo "${prefix} ${line}" + done + IFS="${old_ifs}" +} \ No newline at end of file diff --git a/openshift-hack/lib/log/stacktrace.sh b/openshift-hack/lib/log/stacktrace.sh new file mode 100644 index 0000000000000..e9915efb6342f --- /dev/null +++ b/openshift-hack/lib/log/stacktrace.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +# +# This library contains an implementation of a stack trace for Bash scripts. + +# os::log::stacktrace::install installs the stacktrace as a handler for the ERR signal if one +# has not already been installed and sets `set -o errtrace` in order to propagate the handler +# If the ERR trap is not initialized, installing this plugin will initialize it. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# - export OS_USE_STACKTRACE +function os::log::stacktrace::install() { + # setting 'errtrace' propagates our ERR handler to functions, expansions and subshells + set -o errtrace + + # OS_USE_STACKTRACE is read by os::util::trap at runtime to request a stacktrace + export OS_USE_STACKTRACE=true + + os::util::trap::init_err +} +readonly -f os::log::stacktrace::install + +# os::log::stacktrace::print prints the stacktrace and exits with the return code from the script that +# called for a stack trace. This function will always return 0 if it is not handling the signal, and if it +# is handling the signal, this function will always `exit`, not return, the return code it receives as +# its first argument. +# +# Globals: +# - BASH_SOURCE +# - BASH_LINENO +# - FUNCNAME +# Arguments: +# - 1: the return code of the command in the script that generated the ERR signal +# - 2: the last command that ran before handlers were invoked +# - 3: whether or not `set -o errexit` was set in the script that generated the ERR signal +# Returns: +# None +function os::log::stacktrace::print() { + local return_code=$1 + local last_command=$2 + local errexit_set=${3:-} + + if [[ "${return_code}" = "0" ]]; then + # we're not supposed to respond when no error has occurred + return 0 + fi + + if [[ -z "${errexit_set}" ]]; then + # if errexit wasn't set in the shell when the ERR signal was issued, then we can ignore the signal + # as this is not cause for failure + return 0 + fi + + # dump the entire stack for debugging purposes + os::log::debug "$( os::util::repository_relative_path "${BASH_SOURCE[0]}:${LINENO}: ${BASH_COMMAND}" )" + for (( i = 0; i < ${#BASH_LINENO[@]}; i++ )); do + os::log::debug "$( os::util::repository_relative_path "${BASH_SOURCE[$i+1]:-"$( os::util::repository_relative_path "$0" )"}" ):${BASH_LINENO[$i]}: ${FUNCNAME[$i]}" + done + + # iterate backwards through the stack until we leave library files, so we can be sure we start logging + # actual script code and not this handler's call + local stack_begin_index + for (( stack_begin_index = 0; stack_begin_index < ${#BASH_SOURCE[@]}; stack_begin_index++ )); do + if [[ ! "${BASH_SOURCE[${stack_begin_index}]}" =~ hack/lib/(log/stacktrace|util/trap)\.sh ]]; then + break + fi + done + + local preamble_finished + local stack_index=1 + local i + for (( i = stack_begin_index; i < ${#BASH_SOURCE[@]}; i++ )); do + local bash_source + bash_source="$( os::util::repository_relative_path "${BASH_SOURCE[$i]}" )" + if [[ -z "${preamble_finished:-}" ]]; then + preamble_finished=true + os::log::error "${bash_source}:${BASH_LINENO[$i-1]}: \`${last_command}\` exited with status ${return_code}." >&2 + exit "${return_code}" + fi + stack_index=$(( stack_index + 1 )) + done + + # we know we're the privileged handler in this chain, so we can safely exit the shell without + # starving another handler of the privilege of reacting to this signal + os::log::info " Exiting with code ${return_code}." >&2 + exit "${return_code}" +} +readonly -f os::log::stacktrace::print diff --git a/openshift-hack/lib/test/junit.sh b/openshift-hack/lib/test/junit.sh new file mode 100644 index 0000000000000..18bb3ee857d2c --- /dev/null +++ b/openshift-hack/lib/test/junit.sh @@ -0,0 +1,202 @@ +#!/usr/bin/env bash +# This utility file contains functions that format test output to be parsed into jUnit XML + +# os::test::junit::declare_suite_start prints a message declaring the start of a test suite +# Any number of suites can be in flight at any time, so there is no failure condition for this +# script based on the number of suites in flight. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - NUM_OS_JUNIT_SUITES_IN_FLIGHT +# Arguments: +# - 1: the suite name that is starting +# Returns: +# - increment NUM_OS_JUNIT_SUITES_IN_FLIGHT +function os::test::junit::declare_suite_start() { + local suite_name=$1 + local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0} + + echo "=== BEGIN TEST SUITE github.com/openshift/origin/test/${suite_name} ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" + NUM_OS_JUNIT_SUITES_IN_FLIGHT=$(( num_suites + 1 )) + export NUM_OS_JUNIT_SUITES_IN_FLIGHT +} +readonly -f os::test::junit::declare_suite_start + +# os::test::junit::declare_suite_end prints a message declaring the end of a test suite +# If there aren't any suites in flight, this function will fail. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - NUM_OS_JUNIT_SUITES_IN_FLIGHT +# Arguments: +# - 1: the suite name that is starting +# Returns: +# - export/decrement NUM_OS_JUNIT_SUITES_IN_FLIGHT +function os::test::junit::declare_suite_end() { + local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0} + if [[ "${num_suites}" -lt "1" ]]; then + # we can't end a suite if none have been started yet + echo "[ERROR] jUnit suite marker could not be placed, expected suites in flight, got ${num_suites}" + return 1 + fi + + echo "=== END TEST SUITE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" + NUM_OS_JUNIT_SUITES_IN_FLIGHT=$(( num_suites - 1 )) + export NUM_OS_JUNIT_SUITES_IN_FLIGHT +} +readonly -f os::test::junit::declare_suite_end + +# os::test::junit::declare_test_start prints a message declaring the start of a test case +# If there is already a test marked as being in flight, this function will fail. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - NUM_OS_JUNIT_TESTS_IN_FLIGHT +# Arguments: +# None +# Returns: +# - increment NUM_OS_JUNIT_TESTS_IN_FLIGHT +function os::test::junit::declare_test_start() { + local num_tests=${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0} + if [[ "${num_tests}" -ne "0" ]]; then + # someone's declaring the starting of a test when a test is already in flight + echo "[ERROR] jUnit test marker could not be placed, expected no tests in flight, got ${num_tests}" + return 1 + fi + + local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0} + if [[ "${num_suites}" -lt "1" ]]; then + # we can't end a test if no suites are in flight + echo "[ERROR] jUnit test marker could not be placed, expected suites in flight, got ${num_suites}" + return 1 + fi + + echo "=== BEGIN TEST CASE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" + NUM_OS_JUNIT_TESTS_IN_FLIGHT=$(( num_tests + 1 )) + export NUM_OS_JUNIT_TESTS_IN_FLIGHT +} +readonly -f os::test::junit::declare_test_start + +# os::test::junit::declare_test_end prints a message declaring the end of a test case +# If there is no test marked as being in flight, this function will fail. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - NUM_OS_JUNIT_TESTS_IN_FLIGHT +# Arguments: +# None +# Returns: +# - decrement NUM_OS_JUNIT_TESTS_IN_FLIGHT +function os::test::junit::declare_test_end() { + local num_tests=${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0} + if [[ "${num_tests}" -ne "1" ]]; then + # someone's declaring the end of a test when a test is not in flight + echo "[ERROR] jUnit test marker could not be placed, expected one test in flight, got ${num_tests}" + return 1 + fi + + echo "=== END TEST CASE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" + NUM_OS_JUNIT_TESTS_IN_FLIGHT=$(( num_tests - 1 )) + export NUM_OS_JUNIT_TESTS_IN_FLIGHT +} +readonly -f os::test::junit::declare_test_end + +# os::test::junit::check_test_counters checks that we do not have any test suites or test cases in flight +# This function should be called at the very end of any test script using jUnit markers to make sure no error in +# marking has occurred. +# +# Globals: +# - NUM_OS_JUNIT_SUITES_IN_FLIGHT +# - NUM_OS_JUNIT_TESTS_IN_FLIGHT +# Arguments: +# None +# Returns: +# None +function os::test::junit::check_test_counters() { + if [[ "${NUM_OS_JUNIT_SUITES_IN_FLIGHT-}" -ne "0" ]]; then + echo "[ERROR] Expected no test suites to be marked as in-flight at the end of testing, got ${NUM_OS_JUNIT_SUITES_IN_FLIGHT-}" + return 1 + elif [[ "${NUM_OS_JUNIT_TESTS_IN_FLIGHT-}" -ne "0" ]]; then + echo "[ERROR] Expected no test cases to be marked as in-flight at the end of testing, got ${NUM_OS_JUNIT_TESTS_IN_FLIGHT-}" + return 1 + fi +} +readonly -f os::test::junit::check_test_counters + +# os::test::junit::reconcile_output appends the necessary suite and test end statements to the jUnit output file +# in order to ensure that the file is in a consistent state to allow for parsing +# +# Globals: +# - NUM_OS_JUNIT_SUITES_IN_FLIGHT +# - NUM_OS_JUNIT_TESTS_IN_FLIGHT +# Arguments: +# None +# Returns: +# None +function os::test::junit::reconcile_output() { + if [[ "${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0}" = "1" ]]; then + os::test::junit::declare_test_end + fi + + for (( i = 0; i < ${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}; i++ )); do + os::test::junit::declare_suite_end + done +} +readonly -f os::test::junit::reconcile_output + +# os::test::junit::generate_report determines which type of report is to +# be generated and does so from the raw output of the tests. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - ARTIFACT_DIR +# Arguments: +# None +# Returns: +# None +function os::test::junit::generate_report() { + if [[ -z "${JUNIT_REPORT_OUTPUT:-}" || + -n "${JUNIT_REPORT_OUTPUT:-}" && ! -s "${JUNIT_REPORT_OUTPUT:-}" ]]; then + # we can't generate a report + return 0 + fi + + if grep -q "=== END TEST CASE ===" "${JUNIT_REPORT_OUTPUT}"; then + os::test::junit::reconcile_output + os::test::junit::check_test_counters + os::test::junit::internal::generate_report "oscmd" + fi +} + +# os::test::junit::internal::generate_report generates an XML jUnit +# report for either `os::cmd` or `go test`, based on the passed +# argument. If the `junitreport` binary is not present, it will be built. +# +# Globals: +# - JUNIT_REPORT_OUTPUT +# - ARTIFACT_DIR +# Arguments: +# - 1: specify which type of tests command output should junitreport read +# Returns: +# export JUNIT_REPORT_NUM_FAILED +function os::test::junit::internal::generate_report() { + local report_type="$1" + os::util::ensure::built_binary_exists 'junitreport' + + local report_file + report_file="$( mktemp "${ARTIFACT_DIR}/${report_type}_report_XXXXX" ).xml" + os::log::info "jUnit XML report placed at $( os::util::repository_relative_path "${report_file}" )" + junitreport --type "${report_type}" \ + --suites nested \ + --roots github.com/openshift/origin \ + --output "${report_file}" \ + <"${JUNIT_REPORT_OUTPUT}" + + local summary + summary=$( junitreport summarize <"${report_file}" ) + + JUNIT_REPORT_NUM_FAILED="$( grep -oE "[0-9]+ failed" <<<"${summary}" )" + export JUNIT_REPORT_NUM_FAILED + + echo "${summary}" +} diff --git a/openshift-hack/lib/util/ensure.sh b/openshift-hack/lib/util/ensure.sh new file mode 100644 index 0000000000000..158d94f984f01 --- /dev/null +++ b/openshift-hack/lib/util/ensure.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env bash + +# This script contains helper functions for ensuring that dependencies +# exist on a host system that are required to run Origin scripts. + +# os::util::ensure::system_binary_exists ensures that the +# given binary exists on the system in the $PATH. +# +# Globals: +# None +# Arguments: +# - 1: binary to search for +# Returns: +# None +function os::util::ensure::system_binary_exists() { + local binary="$1" + +if ! os::util::find::system_binary "${binary}" >/dev/null 2>&1; then + os::log::fatal "Required \`${binary}\` binary was not found in \$PATH." + fi +} +readonly -f os::util::ensure::system_binary_exists + +# os::util::ensure::built_binary_exists ensures that the +# given binary exists on the system in the local output +# directory for the current platform. If it doesn't, we +# will attempt to build it if we can determine the correct +# hack/build-go.sh target for the binary. +# +# This function will attempt to determine the correct +# hack/build-go.sh target for the binary, but may not +# be able to do so if the target doesn't live under +# cmd/ or tools/. In that case, one should be given. +# +# Globals: +# - OS_ROOT +# Arguments: +# - 1: binary to search for +# - 2: optional build target for this binary +# Returns: +# None +function os::util::ensure::built_binary_exists() { + local binary="$1" + local target="${2:-}" + + if ! os::util::find::built_binary "${binary}" >/dev/null 2>&1; then + if [[ -z "${target}" ]]; then + if [[ -d "${OS_ROOT}/cmd/${binary}" ]]; then + target="cmd/${binary}" + elif [[ -d "${OS_ROOT}/tools/${binary}" ]]; then + target="tools/${binary}" + elif [[ -d "${OS_ROOT}/openshift-hack/${binary}" ]]; then + target="openshift-hack/${binary}" + fi + fi + + if [[ -n "${target}" ]]; then + os::log::info "No compiled \`${binary}\` binary was found. Attempting to build one using: + $ hack/build-go.sh ${target}" + "${OS_ROOT}/hack/build-go.sh" "${target}" + else + os::log::fatal "No compiled \`${binary}\` binary was found and no build target could be determined. +Provide the binary and try running $0 again." + fi + fi +} +readonly -f os::util::ensure::built_binary_exists + +# os::util::ensure::gopath_binary_exists ensures that the +# given binary exists on the system in $GOPATH. If it +# doesn't, we will attempt to build it if we can determine +# the correct install path for the binary. +# +# Globals: +# - GOPATH +# Arguments: +# - 1: binary to search for +# - 2: [optional] path to install from +# Returns: +# None +function os::util::ensure::gopath_binary_exists() { + local binary="$1" + local install_path="${2:-}" + + if ! os::util::find::gopath_binary "${binary}" >/dev/null 2>&1; then + if [[ -n "${install_path:-}" ]]; then + os::log::info "No installed \`${binary}\` was found in \$GOPATH. Attempting to install using: + $ go get ${install_path}" + go get "${install_path}" + else + os::log::fatal "Required \`${binary}\` binary was not found in \$GOPATH." + fi + fi +} +readonly -f os::util::ensure::gopath_binary_exists + +# os::util::ensure::iptables_privileges_exist tests if the +# testing machine has iptables available and in PATH. Also +# tests that the user can list iptables rules, trying with +# `sudo` if it fails without. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::ensure::iptables_privileges_exist() { + os::util::ensure::system_binary_exists 'iptables' + + if ! iptables --list >/dev/null 2>&1 && ! sudo iptables --list >/dev/null 2>&1; then + os::log::fatal "You do not have \`iptables\` or \`sudo\` privileges. Kubernetes services will not work +without \`iptables\` access. See https://github.com/kubernetes/kubernetes/issues/1859." + fi +} +readonly -f os::util::ensure::iptables_privileges_exist diff --git a/openshift-hack/lib/util/environment.sh b/openshift-hack/lib/util/environment.sh new file mode 100644 index 0000000000000..1b0d55c7c471a --- /dev/null +++ b/openshift-hack/lib/util/environment.sh @@ -0,0 +1,296 @@ +#!/usr/bin/env bash + +# This script holds library functions for setting up the shell environment for OpenShift scripts + +# os::util::environment::use_sudo updates $USE_SUDO to be 'true', so that later scripts choosing between +# execution using 'sudo' and execution without it chose to use 'sudo' +# +# Globals: +# None +# Arguments: +# None +# Returns: +# - export USE_SUDO +function os::util::environment::use_sudo() { + USE_SUDO=true + export USE_SUDO +} +readonly -f os::util::environment::use_sudo + +# os::util::environment::setup_time_vars sets up environment variables that describe durations of time +# These variables can be used to specify times for other utility functions +# +# Globals: +# None +# Arguments: +# None +# Returns: +# - export TIME_MS +# - export TIME_SEC +# - export TIME_MIN +function os::util::environment::setup_time_vars() { + TIME_MS=1 + export TIME_MS + TIME_SEC="$(( 1000 * TIME_MS ))" + export TIME_SEC + TIME_MIN="$(( 60 * TIME_SEC ))" + export TIME_MIN +} +readonly -f os::util::environment::setup_time_vars + +# os::util::environment::setup_all_server_vars sets up all environment variables necessary to configure and start an OpenShift server +# +# Globals: +# - OS_ROOT +# - PATH +# - TMPDIR +# - LOG_DIR +# - ARTIFACT_DIR +# - KUBELET_SCHEME +# - KUBELET_BIND_HOST +# - KUBELET_HOST +# - KUBELET_PORT +# - BASETMPDIR +# - ETCD_PORT +# - ETCD_PEER_PORT +# - API_BIND_HOST +# - API_HOST +# - API_PORT +# - API_SCHEME +# - PUBLIC_MASTER_HOST +# - USE_IMAGES +# Arguments: +# - 1: the path under the root temporary directory for OpenShift where these subdirectories should be made +# Returns: +# - export PATH +# - export BASETMPDIR +# - export LOG_DIR +# - export VOLUME_DIR +# - export ARTIFACT_DIR +# - export FAKE_HOME_DIR +# - export KUBELET_SCHEME +# - export KUBELET_BIND_HOST +# - export KUBELET_HOST +# - export KUBELET_PORT +# - export ETCD_PORT +# - export ETCD_PEER_PORT +# - export ETCD_DATA_DIR +# - export API_BIND_HOST +# - export API_HOST +# - export API_PORT +# - export API_SCHEME +# - export SERVER_CONFIG_DIR +# - export MASTER_CONFIG_DIR +# - export NODE_CONFIG_DIR +# - export USE_IMAGES +# - export TAG +function os::util::environment::setup_all_server_vars() { + os::util::environment::setup_kubelet_vars + os::util::environment::setup_etcd_vars + os::util::environment::setup_server_vars + os::util::environment::setup_images_vars +} +readonly -f os::util::environment::setup_all_server_vars + +# os::util::environment::update_path_var updates $PATH so that OpenShift binaries are available +# +# Globals: +# - OS_ROOT +# - PATH +# Arguments: +# None +# Returns: +# - export PATH +function os::util::environment::update_path_var() { + local prefix + if os::util::find::system_binary 'go' >/dev/null 2>&1; then + prefix+="${OS_OUTPUT_BINPATH}/$(os::build::host_platform):" + fi + if [[ -n "${GOPATH:-}" ]]; then + prefix+="${GOPATH}/bin:" + fi + + PATH="${prefix:-}${PATH}" + export PATH +} +readonly -f os::util::environment::update_path_var + +# os::util::environment::setup_tmpdir_vars sets up temporary directory path variables +# +# Globals: +# - TMPDIR +# Arguments: +# - 1: the path under the root temporary directory for OpenShift where these subdirectories should be made +# Returns: +# - export BASETMPDIR +# - export BASEOUTDIR +# - export LOG_DIR +# - export VOLUME_DIR +# - export ARTIFACT_DIR +# - export FAKE_HOME_DIR +# - export OS_TMP_ENV_SET +function os::util::environment::setup_tmpdir_vars() { + local sub_dir=$1 + + BASETMPDIR="${TMPDIR:-/tmp}/openshift/${sub_dir}" + export BASETMPDIR + VOLUME_DIR="${BASETMPDIR}/volumes" + export VOLUME_DIR + + BASEOUTDIR="${OS_OUTPUT_SCRIPTPATH}/${sub_dir}" + export BASEOUTDIR + LOG_DIR="${ARTIFACT_DIR:-${BASEOUTDIR}}/logs" + export LOG_DIR + ARTIFACT_DIR="${ARTIFACT_DIR:-${BASEOUTDIR}/artifacts}" + export ARTIFACT_DIR + FAKE_HOME_DIR="${BASEOUTDIR}/openshift.local.home" + export FAKE_HOME_DIR + + mkdir -p "${LOG_DIR}" "${VOLUME_DIR}" "${ARTIFACT_DIR}" "${FAKE_HOME_DIR}" + + export OS_TMP_ENV_SET="${sub_dir}" +} +readonly -f os::util::environment::setup_tmpdir_vars + +# os::util::environment::setup_kubelet_vars sets up environment variables necessary for interacting with the kubelet +# +# Globals: +# - KUBELET_SCHEME +# - KUBELET_BIND_HOST +# - KUBELET_HOST +# - KUBELET_PORT +# Arguments: +# None +# Returns: +# - export KUBELET_SCHEME +# - export KUBELET_BIND_HOST +# - export KUBELET_HOST +# - export KUBELET_PORT +function os::util::environment::setup_kubelet_vars() { + KUBELET_SCHEME="${KUBELET_SCHEME:-https}" + export KUBELET_SCHEME + KUBELET_BIND_HOST="${KUBELET_BIND_HOST:-127.0.0.1}" + export KUBELET_BIND_HOST + KUBELET_HOST="${KUBELET_HOST:-${KUBELET_BIND_HOST}}" + export KUBELET_HOST + KUBELET_PORT="${KUBELET_PORT:-10250}" + export KUBELET_PORT +} +readonly -f os::util::environment::setup_kubelet_vars + +# os::util::environment::setup_etcd_vars sets up environment variables necessary for interacting with etcd +# +# Globals: +# - BASETMPDIR +# - ETCD_HOST +# - ETCD_PORT +# - ETCD_PEER_PORT +# Arguments: +# None +# Returns: +# - export ETCD_HOST +# - export ETCD_PORT +# - export ETCD_PEER_PORT +# - export ETCD_DATA_DIR +function os::util::environment::setup_etcd_vars() { + ETCD_HOST="${ETCD_HOST:-127.0.0.1}" + export ETCD_HOST + ETCD_PORT="${ETCD_PORT:-4001}" + export ETCD_PORT + ETCD_PEER_PORT="${ETCD_PEER_PORT:-7001}" + export ETCD_PEER_PORT + + ETCD_DATA_DIR="${BASETMPDIR}/etcd" + export ETCD_DATA_DIR + + mkdir -p "${ETCD_DATA_DIR}" +} +readonly -f os::util::environment::setup_etcd_vars + +# os::util::environment::setup_server_vars sets up environment variables necessary for interacting with the server +# +# Globals: +# - BASETMPDIR +# - KUBELET_HOST +# - API_BIND_HOST +# - API_HOST +# - API_PORT +# - API_SCHEME +# - PUBLIC_MASTER_HOST +# Arguments: +# None +# Returns: +# - export API_BIND_HOST +# - export API_HOST +# - export API_PORT +# - export API_SCHEME +# - export SERVER_CONFIG_DIR +# - export MASTER_CONFIG_DIR +# - export NODE_CONFIG_DIR +function os::util::environment::setup_server_vars() { + # turn on cache mutation detector every time we start a server + KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}" + export KUBE_CACHE_MUTATION_DETECTOR + + API_BIND_HOST="${API_BIND_HOST:-127.0.0.1}" + export API_BIND_HOST + API_HOST="${API_HOST:-${API_BIND_HOST}}" + export API_HOST + API_PORT="${API_PORT:-8443}" + export API_PORT + API_SCHEME="${API_SCHEME:-https}" + export API_SCHEME + + MASTER_ADDR="${API_SCHEME}://${API_HOST}:${API_PORT}" + export MASTER_ADDR + PUBLIC_MASTER_HOST="${PUBLIC_MASTER_HOST:-${API_HOST}}" + export PUBLIC_MASTER_HOST + + SERVER_CONFIG_DIR="${BASETMPDIR}/openshift.local.config" + export SERVER_CONFIG_DIR + MASTER_CONFIG_DIR="${SERVER_CONFIG_DIR}/master" + export MASTER_CONFIG_DIR + NODE_CONFIG_DIR="${SERVER_CONFIG_DIR}/node-${KUBELET_HOST}" + export NODE_CONFIG_DIR + + ETCD_CLIENT_CERT="${MASTER_CONFIG_DIR}/master.etcd-client.crt" + export ETCD_CLIENT_CERT + ETCD_CLIENT_KEY="${MASTER_CONFIG_DIR}/master.etcd-client.key" + export ETCD_CLIENT_KEY + ETCD_CA_BUNDLE="${MASTER_CONFIG_DIR}/ca-bundle.crt" + export ETCD_CA_BUNDLE + + mkdir -p "${SERVER_CONFIG_DIR}" "${MASTER_CONFIG_DIR}" "${NODE_CONFIG_DIR}" +} +readonly -f os::util::environment::setup_server_vars + +# os::util::environment::setup_images_vars sets up environment variables necessary for interacting with release images +# +# Globals: +# - OS_ROOT +# - USE_IMAGES +# Arguments: +# None +# Returns: +# - export USE_IMAGES +# - export TAG +# - export MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY +function os::util::environment::setup_images_vars() { + # Use either the latest release built images, or latest. + IMAGE_PREFIX="${OS_IMAGE_PREFIX:-"openshift/origin"}" + if [[ -z "${USE_IMAGES-}" ]]; then + TAG='latest' + export TAG + USE_IMAGES="${IMAGE_PREFIX}-\${component}:latest" + export USE_IMAGES + + if [[ -e "${OS_ROOT}/_output/local/releases/.commit" ]]; then + TAG="$(cat "${OS_ROOT}/_output/local/releases/.commit")" + export TAG + USE_IMAGES="${IMAGE_PREFIX}-\${component}:${TAG}" + export USE_IMAGES + fi + fi + export MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY="${MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY:-3}" +} +readonly -f os::util::environment::setup_images_vars diff --git a/openshift-hack/lib/util/find.sh b/openshift-hack/lib/util/find.sh new file mode 100644 index 0000000000000..4ca12d040f9b3 --- /dev/null +++ b/openshift-hack/lib/util/find.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# This script contains helper functions for finding components +# in the Origin repository or on the host machine running scripts. + +# os::util::find::system_binary determines the absolute path to a +# system binary, if it exists. +# +# Globals: +# None +# Arguments: +# - 1: binary name +# Returns: +# - location of the binary +function os::util::find::system_binary() { + local binary_name="$1" + + command -v "${binary_name}" +} +readonly -f os::util::find::system_binary + +# os::util::find::built_binary determines the absolute path to a +# built binary for the current platform, if it exists. +# +# Globals: +# - OS_OUTPUT_BINPATH +# Arguments: +# - 1: binary name +# Returns: +# - location of the binary +function os::util::find::built_binary() { + local binary_name="$1" + + local binary_path; binary_path="${OS_OUTPUT_BINPATH}/$( os::build::host_platform )/${binary_name}" + # we need to check that the path leads to a file + # as directories also have the executable bit set + if [[ -f "${binary_path}" && -x "${binary_path}" ]]; then + echo "${binary_path}" + return 0 + else + return 1 + fi +} +readonly -f os::util::find::built_binary + +# os::util::find::gopath_binary determines the absolute path to a +# binary installed through the go toolchain, if it exists. +# +# Globals: +# - GOPATH +# Arguments: +# - 1: binary name +# Returns: +# - location of the binary +function os::util::find::gopath_binary() { + local binary_name="$1" + + local old_ifs="${IFS}" + IFS=":" + for part in ${GOPATH}; do + local binary_path="${part}/bin/${binary_name}" + # we need to check that the path leads to a file + # as directories also have the executable bit set + if [[ -f "${binary_path}" && -x "${binary_path}" ]]; then + echo "${binary_path}" + IFS="${old_ifs}" + return 0 + fi + done + IFS="${old_ifs}" + return 1 +} +readonly -f os::util::find::gopath_binary \ No newline at end of file diff --git a/openshift-hack/lib/util/misc.sh b/openshift-hack/lib/util/misc.sh new file mode 100644 index 0000000000000..69ea27dc43e2a --- /dev/null +++ b/openshift-hack/lib/util/misc.sh @@ -0,0 +1,224 @@ +#!/usr/bin/env bash +# +# This library holds miscellaneous utility functions. If there begin to be groups of functions in this +# file that share intent or are thematically similar, they should be split into their own files. + +# os::util::describe_return_code describes an exit code +# +# Globals: +# - OS_SCRIPT_START_TIME +# Arguments: +# - 1: exit code to describe +# Returns: +# None +function os::util::describe_return_code() { + local return_code=$1 + local message + message="$( os::util::repository_relative_path "$0" ) exited with code ${return_code} " + + if [[ -n "${OS_SCRIPT_START_TIME:-}" ]]; then + local end_time + end_time="$(date +%s)" + local elapsed_time + elapsed_time="$(( end_time - OS_SCRIPT_START_TIME ))" + local formatted_time + formatted_time="$( os::util::format_seconds "${elapsed_time}" )" + message+="after ${formatted_time}" + fi + + if [[ "${return_code}" = "0" ]]; then + os::log::info "${message}" + else + os::log::error "${message}" + fi +} +readonly -f os::util::describe_return_code + +# os::util::install_describe_return_code installs the return code describer for the EXIT trap +# If the EXIT trap is not initialized, installing this plugin will initialize it. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# - export OS_DESCRIBE_RETURN_CODE +# - export OS_SCRIPT_START_TIME +function os::util::install_describe_return_code() { + export OS_DESCRIBE_RETURN_CODE="true" + OS_SCRIPT_START_TIME="$( date +%s )"; export OS_SCRIPT_START_TIME + os::util::trap::init_exit +} +readonly -f os::util::install_describe_return_code + +# OS_ORIGINAL_WD is the original working directory the script sourcing this utility file was called +# from. This is an important directory as if $0 is a relative path, we cannot use the following path +# utility without knowing from where $0 is relative. +if [[ -z "${OS_ORIGINAL_WD:-}" ]]; then + # since this could be sourced in a context where the utilities are already loaded, + # we want to ensure that this is re-entrant, so we only set $OS_ORIGINAL_WD if it + # is not set already + OS_ORIGINAL_WD="$( pwd )" + readonly OS_ORIGINAL_WD + export OS_ORIGINAL_WD +fi + +# os::util::repository_relative_path returns the relative path from the $OS_ROOT directory to the +# given file, if the file is inside of the $OS_ROOT directory. If the file is outside of $OS_ROOT, +# this function will return the absolute path to the file +# +# Globals: +# - OS_ROOT +# Arguments: +# - 1: the path to relativize +# Returns: +# None +function os::util::repository_relative_path() { + local filename=$1 + local directory; directory="$( dirname "${filename}" )" + filename="$( basename "${filename}" )" + + if [[ "${directory}" != "${OS_ROOT}"* ]]; then + pushd "${OS_ORIGINAL_WD}" >/dev/null 2>&1 || exit 1 + directory="$( os::util::absolute_path "${directory}" )" + popd >/dev/null 2>&1 || exit 1 + fi + + directory="${directory##*${OS_ROOT}/}" + + echo "${directory}/${filename}" +} +readonly -f os::util::repository_relative_path + +# os::util::format_seconds formats a duration of time in seconds to print in HHh MMm SSs +# +# Globals: +# None +# Arguments: +# - 1: time in seconds to format +# Return: +# None +function os::util::format_seconds() { + local raw_seconds=$1 + + local hours minutes seconds + (( hours=raw_seconds/3600 )) + (( minutes=(raw_seconds%3600)/60 )) + (( seconds=raw_seconds%60 )) + + printf '%02dh %02dm %02ds' "${hours}" "${minutes}" "${seconds}" +} +readonly -f os::util::format_seconds + +# os::util::sed attempts to make our Bash scripts agnostic to the platform +# on which they run `sed` by glossing over a discrepancy in flag use in GNU. +# +# Globals: +# None +# Arguments: +# - all: arguments to pass to `sed -i` +# Return: +# None +function os::util::sed() { + local sudo="${USE_SUDO:+sudo}" + if LANG=C sed --help 2>&1 | grep -q "GNU sed"; then + ${sudo} sed -i'' "$@" + else + ${sudo} sed -i '' "$@" + fi +} +readonly -f os::util::sed + +# os::util::base64decode attempts to make our Bash scripts agnostic to the platform +# on which they run `base64decode` by glossing over a discrepancy in flag use in GNU. +# +# Globals: +# None +# Arguments: +# - all: arguments to pass to `base64decode` +# Return: +# None +function os::util::base64decode() { + if [[ "$(go env GOHOSTOS)" == "darwin" ]]; then + base64 -D "$@" + else + base64 -d "$@" + fi +} +readonly -f os::util::base64decode + +# os::util::curl_etcd sends a request to the backing etcd store for the master. +# We use the administrative client cert and key for access and re-encode them +# as necessary for OSX clients. +# +# Globals: +# MASTER_CONFIG_DIR +# API_SCHEME +# API_HOST +# ETCD_PORT +# Arguments: +# - 1: etcd-relative URL to curl, with leading slash +# Returns: +# None +function os::util::curl_etcd() { + local url="$1" + local full_url="${API_SCHEME}://${API_HOST}:${ETCD_PORT}${url}" + + local etcd_client_cert="${MASTER_CONFIG_DIR}/master.etcd-client.crt" + local etcd_client_key="${MASTER_CONFIG_DIR}/master.etcd-client.key" + local ca_bundle="${MASTER_CONFIG_DIR}/ca-bundle.crt" + + if curl -V | grep -q 'SecureTransport'; then + # on newer OSX `curl` implementations, SSL is not used and client certs + # and keys are expected to be encoded in P12 format instead of PEM format, + # so we need to convert the secrets that the server wrote if we haven't + # already done so + local etcd_client_cert_p12="${MASTER_CONFIG_DIR}/master.etcd-client.crt.p12" + local etcd_client_cert_p12_password="${CURL_CERT_P12_PASSWORD:-'password'}" + if [[ ! -f "${etcd_client_cert_p12}" ]]; then + openssl pkcs12 -export \ + -in "${etcd_client_cert}" \ + -inkey "${etcd_client_key}" \ + -out "${etcd_client_cert_p12}" \ + -password "pass:${etcd_client_cert_p12_password}" + fi + + curl --fail --silent --cacert "${ca_bundle}" \ + --cert "${etcd_client_cert_p12}:${etcd_client_cert_p12_password}" "${full_url}" + else + curl --fail --silent --cacert "${ca_bundle}" \ + --cert "${etcd_client_cert}" --key "${etcd_client_key}" "${full_url}" + fi +} + +# os::util::ensure_tmpfs ensures that the target dir is mounted on tmpfs +# +# Globals: +# OS_TMPFS_REQUIRED +# Arguments: +# - 1: target to check +# Returns: +# None +function os::util::ensure_tmpfs() { + if [[ -z "${OS_TMPFS_REQUIRED:-}" ]]; then + return 0 + fi + + local target="$1" + if [[ ! -d "${target}" ]]; then + os::log::fatal "Target dir ${target} does not exist, cannot perform fstype check." + fi + + os::log::debug "Filesystem information: +$( df -h -T )" + + os::log::debug "Mount information: +$( findmnt --all )" + + local fstype + fstype="$( df --output=fstype "${target}" | tail -n 1 )" + if [[ "${fstype}" != "tmpfs" ]]; then + local message="Expected \`${target}\` to be mounted on \`tmpfs\` but found \`${fstype}\` instead." + os::log::fatal "${message}" + fi +} diff --git a/openshift-hack/lib/util/text.sh b/openshift-hack/lib/util/text.sh new file mode 100644 index 0000000000000..708a47251cb20 --- /dev/null +++ b/openshift-hack/lib/util/text.sh @@ -0,0 +1,164 @@ +#!/usr/bin/env bash + +# This file contains helpful aliases for manipulating the output text to the terminal as +# well as functions for one-command augmented printing. + +# os::text::reset resets the terminal output to default if it is called in a TTY +function os::text::reset() { + if os::text::internal::is_tty; then + tput sgr0 + fi +} +readonly -f os::text::reset + +# os::text::bold sets the terminal output to bold text if it is called in a TTY +function os::text::bold() { + if os::text::internal::is_tty; then + tput bold + fi +} +readonly -f os::text::bold + +# os::text::red sets the terminal output to red text if it is called in a TTY +function os::text::red() { + if os::text::internal::is_tty; then + tput setaf 1 + fi +} +readonly -f os::text::red + +# os::text::green sets the terminal output to green text if it is called in a TTY +function os::text::green() { + if os::text::internal::is_tty; then + tput setaf 2 + fi +} +readonly -f os::text::green + +# os::text::blue sets the terminal output to blue text if it is called in a TTY +function os::text::blue() { + if os::text::internal::is_tty; then + tput setaf 4 + fi +} +readonly -f os::text::blue + +# os::text::yellow sets the terminal output to yellow text if it is called in a TTY +function os::text::yellow() { + if os::text::internal::is_tty; then + tput setaf 11 + fi +} +readonly -f os::text::yellow + +# os::text::clear_last_line clears the text from the last line of output to the +# terminal and leaves the cursor on that line to allow for overwriting that text +# if it is called in a TTY +function os::text::clear_last_line() { + if os::text::internal::is_tty; then + tput cuu 1 + tput el + fi +} +readonly -f os::text::clear_last_line + +# os::text::clear_string attempts to clear the entirety of a string from the terminal. +# If the string contains literal tabs or other characters that take up more than one +# character space in output, or if the window size is changed before this function +# is called, it will not function correctly. +# No action is taken if this is called outside of a TTY +function os::text::clear_string() { + local -r string="$1" + if os::text::internal::is_tty; then + echo "${string}" | while read -r line; do + # num_lines is the number of terminal lines this one line of output + # would have taken up with the current terminal width in columns + local num_lines=$(( ${#line} / $( tput cols ) )) + for (( i = 0; i <= num_lines; i++ )); do + os::text::clear_last_line + done + done + fi +} + +# os::text::internal::is_tty determines if we are outputting to a TTY +function os::text::internal::is_tty() { + [[ -t 1 && -n "${TERM:-}" ]] +} +readonly -f os::text::internal::is_tty + +# os::text::print_bold prints all input in bold text +function os::text::print_bold() { + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_bold + +# os::text::print_red prints all input in red text +function os::text::print_red() { + os::text::red + echo "${*}" + os::text::reset +} +readonly -f os::text::print_red + +# os::text::print_red_bold prints all input in bold red text +function os::text::print_red_bold() { + os::text::red + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_red_bold + +# os::text::print_green prints all input in green text +function os::text::print_green() { + os::text::green + echo "${*}" + os::text::reset +} +readonly -f os::text::print_green + +# os::text::print_green_bold prints all input in bold green text +function os::text::print_green_bold() { + os::text::green + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_green_bold + +# os::text::print_blue prints all input in blue text +function os::text::print_blue() { + os::text::blue + echo "${*}" + os::text::reset +} +readonly -f os::text::print_blue + +# os::text::print_blue_bold prints all input in bold blue text +function os::text::print_blue_bold() { + os::text::blue + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_blue_bold + +# os::text::print_yellow prints all input in yellow text +function os::text::print_yellow() { + os::text::yellow + echo "${*}" + os::text::reset +} +readonly -f os::text::print_yellow + +# os::text::print_yellow_bold prints all input in bold yellow text +function os::text::print_yellow_bold() { + os::text::yellow + os::text::bold + echo "${*}" + os::text::reset +} +readonly -f os::text::print_yellow_bold diff --git a/openshift-hack/lib/util/trap.sh b/openshift-hack/lib/util/trap.sh new file mode 100644 index 0000000000000..f76d6bfe404d5 --- /dev/null +++ b/openshift-hack/lib/util/trap.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +# +# This library defines the trap handlers for the ERR and EXIT signals. Any new handler for these signals +# must be added to these handlers and activated by the environment variable mechanism that the rest use. +# These functions ensure that no handler can ever alter the exit code that was emitted by a command +# in a test script. + +# os::util::trap::init_err initializes the privileged handler for the ERR signal if it hasn't +# been registered already. This will overwrite any other handlers registered on the signal. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::trap::init_err() { + if ! trap -p ERR | grep -q 'os::util::trap::err_handler'; then + trap 'os::util::trap::err_handler;' ERR + fi +} +readonly -f os::util::trap::init_err + +# os::util::trap::init_exit initializes the privileged handler for the EXIT signal if it hasn't +# been registered already. This will overwrite any other handlers registered on the signal. +# +# Globals: +# None +# Arguments: +# None +# Returns: +# None +function os::util::trap::init_exit() { + if ! trap -p EXIT | grep -q 'os::util::trap::exit_handler'; then + trap 'os::util::trap::exit_handler;' EXIT + fi +} +readonly -f os::util::trap::init_exit + +# os::util::trap::err_handler is the handler for the ERR signal. +# +# Globals: +# - OS_TRAP_DEBUG +# - OS_USE_STACKTRACE +# Arguments: +# None +# Returns: +# - returns original return code, allows privileged handler to exit if necessary +function os::util::trap::err_handler() { + local -r return_code=$? + local -r last_command="${BASH_COMMAND}" + + if set +o | grep -q '\-o errexit'; then + local -r errexit_set=true + fi + + if [[ "${OS_TRAP_DEBUG:-}" = "true" ]]; then + echo "[DEBUG] Error handler executing with return code \`${return_code}\`, last command \`${last_command}\`, and errexit set \`${errexit_set:-}\`" + fi + + if [[ "${OS_USE_STACKTRACE:-}" = "true" ]]; then + # the OpenShift stacktrace function is treated as a privileged handler for this signal + # and is therefore allowed to run outside of a subshell in order to allow it to `exit` + # if necessary + os::log::stacktrace::print "${return_code}" "${last_command}" "${errexit_set:-}" + fi + + return "${return_code}" +} +readonly -f os::util::trap::err_handler + +# os::util::trap::exit_handler is the handler for the EXIT signal. +# +# Globals: +# - OS_TRAP_DEBUG +# - OS_DESCRIBE_RETURN_CODE +# Arguments: +# None +# Returns: +# - original exit code of the script that exited +function os::util::trap::exit_handler() { + local -r return_code=$? + + # we do not want these traps to be able to trigger more errors, we can let them fail silently + set +o errexit + + if [[ "${OS_TRAP_DEBUG:-}" = "true" ]]; then + echo "[DEBUG] Exit handler executing with return code \`${return_code}\`" + fi + + # the following envars selectively enable optional exit traps, all of which are run inside of + # a subshell in order to sandbox them and not allow them to influence how this script will exit + if [[ "${OS_DESCRIBE_RETURN_CODE:-}" = "true" ]]; then + ( os::util::describe_return_code "${return_code}" ) + fi + + exit "${return_code}" +} +readonly -f os::util::trap::exit_handler diff --git a/openshift-hack/rebase.sh b/openshift-hack/rebase.sh new file mode 100755 index 0000000000000..70ea50b38baa9 --- /dev/null +++ b/openshift-hack/rebase.sh @@ -0,0 +1,175 @@ +#!/bin/bash + +# READ FIRST BEFORE USING THIS SCRIPT +# +# This script requires jq, git, podman and bash to work properly (dependencies are checked for you). +# The Github CLI "gh" is optional, but convenient to create a pull request automatically at the end. +# +# This script generates a git remote structure described in: +# https://github.com/openshift/kubernetes/blob/master/REBASE.openshift.md#preparing-the-local-repo-clone +# Please check if you have configured the correct remotes, otherwise the script will fail. +# +# The usage is described in /Rebase.openshift.md. + +# validate input args --k8s-tag=v1.21.2 --openshift-release=release-4.8 --bugzilla-id=2003027 +k8s_tag="" +openshift_release="" +bugzilla_id="" + +usage() { + echo "Available arguments:" + echo " --k8s-tag (required) Example: --k8s-tag=v1.21.2" + echo " --openshift-release (required) Example: --openshift-release=release-4.8" + echo " --bugzilla-id (optional) creates new PR against openshift/kubernetes:${openshift-release}: Example: --bugzilla-id=2003027" +} + +for i in "$@"; do + case $i in + --k8s-tag=*) + k8s_tag="${i#*=}" + shift + ;; + --openshift-release=*) + openshift_release="${i#*=}" + shift + ;; + --bugzilla-id=*) + bugzilla_id="${i#*=}" + shift + ;; + *) + usage + exit 1 + ;; + esac +done + +if [ -z "${k8s_tag}" ]; then + echo "Required argument missing: --k8s-tag" + echo "" + usage + exit 1 +fi + +if [ -z "${openshift_release}" ]; then + echo "Required argument missing: --openshift-release" + echo "" + usage + exit 1 +fi + +echo "Processed arguments are:" +echo "--k8s_tag=${k8s_tag}" +echo "--openshift_release=${openshift_release}" +echo "--bugzilla_id=${bugzilla_id}" + +# prerequisites (check git, podman, ... is present) +if ! command -v git &>/dev/null; then + echo "git not installed, exiting" + exit 1 +fi + +if ! command -v jq &>/dev/null; then + echo "jq not installed, exiting" + exit 1 +fi + +if ! command -v podman &>/dev/null; then + echo "podman not installed, exiting" + exit 1 +fi + +# make sure we're in "kubernetes" dir +if [[ $(basename "$PWD") != "kubernetes" ]]; then + echo "Not in kubernetes dir, exiting" + exit 1 +fi + +origin=$(git remote get-url origin) +if [[ "$origin" =~ .*kubernetes/kubernetes.* || "$origin" =~ .*openshift/kubernetes.* ]]; then + echo "cannot rebase against k/k or o/k! found: ${origin}, exiting" + exit 1 +fi + +# fetch remote https://github.com/kubernetes/kubernetes +git remote add upstream git@github.com:kubernetes/kubernetes.git +git fetch upstream --tags -f +# fetch remote https://github.com/openshift/kubernetes +git remote add openshift git@github.com:openshift/kubernetes.git +git fetch openshift + +#git checkout --track "openshift/$openshift_release" +git pull openshift "$openshift_release" + +git merge "$k8s_tag" +# shellcheck disable=SC2181 +if [ $? -eq 0 ]; then + echo "No conflicts detected. Automatic merge looks to have succeeded" +else + # commit conflicts + git commit -a + # resolve conflicts + git status + # TODO(tjungblu): we follow-up with a more automated approach: + # - 2/3s of conflicts stem from go.mod/sum, which can be resolved deterministically + # - the large majority of the remainder are vendor/generation conflicts + # - only very few cases require manual intervention due to conflicting business logic + echo "Resolve conflicts manually in another terminal, only then continue" + + # wait for user interaction + read -n 1 -s -r -p "PRESS ANY KEY TO CONTINUE" + + # TODO(tjungblu): verify that the conflicts have been resolved + git commit -am "UPSTREAM: : manually resolve conflicts" +fi + +# openshift-hack/images/hyperkube/Dockerfile.rhel still has FROM pointing to old tag +# we need to remove the prefix "v" from the $k8s_tag to stay compatible +sed -i -E "s/(io.openshift.build.versions=\"kubernetes=)(1.[1-9]+.[1-9]+)/\1${k8s_tag:1}/" openshift-hack/images/hyperkube/Dockerfile.rhel +go_mod_go_ver=$(grep -E 'go 1\.[1-9][0-9]?' go.mod | sed -E 's/go (1\.[1-9][0-9]?)/\1/') +tag="rhel-8-release-golang-${go_mod_go_ver}-openshift-${openshift_release#release-}" + +# update openshift go.mod dependencies +sed -i -E "/=>/! s/(\tgithub.com\/openshift\/[a-z|-]+) (.*)$/\1 $openshift_release/" go.mod + +echo "> go mod tidy && hack/update-vendor.sh" +podman run -it --rm -v "$(pwd):/go/k8s.io/kubernetes:Z" \ + --workdir=/go/k8s.io/kubernetes \ + "registry.ci.openshift.org/openshift/release:$tag" \ + go mod tidy && hack/update-vendor.sh + +# shellcheck disable=SC2181 +if [ $? -ne 0 ]; then + echo "updating the vendor folder failed, is any dependency missing?" + exit 1 +fi + +podman run -it --rm -v "$(pwd):/go/k8s.io/kubernetes:Z" \ + --workdir=/go/k8s.io/kubernetes \ + "registry.ci.openshift.org/openshift/release:$tag" \ + make update OS_RUN_WITHOUT_DOCKER=yes + +git add -A +git commit -m "UPSTREAM: : hack/update-vendor.sh, make update and update image" + +remote_branch="rebase-$k8s_tag" +git push origin "$openshift_release:$remote_branch" + +XY=$(echo "$k8s_tag" | sed -E "s/v(1\.[0-9]+)\.[0-9]+/\1/") +ver=$(echo "$k8s_tag" | sed "s/\.//g") +link="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-$XY.md#$ver" +if [ -n "${bugzilla_id}" ]; then + if command -v gh &>/dev/null; then + XY=$(echo "$k8s_tag" | sed -E "s/v(1\.[0-9]+)\.[0-9]+/\1/") + ver=$(echo "$k8s_tag" | sed "s/\.//g") + link="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-$XY.md#$ver" + + # opens a web browser, because we can't properly create PRs against remote repositories with the GH CLI (yet): + # https://github.com/cli/cli/issues/2691 + gh pr create \ + --title "Bug $bugzilla_id: Rebase $k8s_tag" \ + --body "CHANGELOG $link" \ + --web + + fi +fi diff --git a/openshift-hack/sysctls/50-kubelet.conf b/openshift-hack/sysctls/50-kubelet.conf new file mode 100644 index 0000000000000..3a4d5a7b1af63 --- /dev/null +++ b/openshift-hack/sysctls/50-kubelet.conf @@ -0,0 +1,6 @@ +kernel.keys.root_maxbytes=25000000 +kernel.keys.root_maxkeys=1000000 +kernel.panic=10 +kernel.panic_on_oops=1 +vm.overcommit_memory=1 +vm.panic_on_oom=0 diff --git a/openshift-hack/test-go.sh b/openshift-hack/test-go.sh new file mode 100755 index 0000000000000..30793e2b082df --- /dev/null +++ b/openshift-hack/test-go.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +ARTIFACTS="${ARTIFACTS:-/tmp/artifacts}" +mkdir -p "${ARTIFACTS}" + +export KUBERNETES_SERVICE_HOST= +export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}" +export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y +export KUBE_RACE=-race +export KUBE_TEST_ARGS='-p 8' +export KUBE_TIMEOUT='--timeout=360s' + +make test diff --git a/openshift-hack/test-integration.sh b/openshift-hack/test-integration.sh new file mode 100755 index 0000000000000..93c3ea902b099 --- /dev/null +++ b/openshift-hack/test-integration.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +./hack/install-etcd.sh +PATH="${OS_ROOT}/third_party/etcd:${PATH}" + +ARTIFACTS="${ARTIFACTS:-/tmp/artifacts}" +mkdir -p "${ARTIFACTS}" + +export KUBERNETES_SERVICE_HOST= +export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}" +export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y +export KUBE_RACE=-race +export KUBE_TEST_ARGS='-p 8' +export LOG_LEVEL=4 +export PATH + +make test-integration diff --git a/openshift-hack/test-kubernetes-e2e.sh b/openshift-hack/test-kubernetes-e2e.sh new file mode 100755 index 0000000000000..ea005aee55aef --- /dev/null +++ b/openshift-hack/test-kubernetes-e2e.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +set -o nounset +set -o errexit +set -o pipefail + +# This script is executes kubernetes e2e tests against an openshift +# cluster. It is intended to be copied to the kubernetes-tests image +# for use in CI and should have no dependencies beyond oc, kubectl and +# k8s-e2e.test. + +# Identify the platform under test to allow skipping tests that are +# not compatible. +CLUSTER_TYPE="${CLUSTER_TYPE:-gcp}" +case "${CLUSTER_TYPE}" in + gcp) + # gce is used as a platform label instead of gcp + PLATFORM=gce + ;; + *) + PLATFORM="${CLUSTER_TYPE}" + ;; +esac + +# openshift-tests will check the cluster's network configuration and +# automatically skip any incompatible tests. We have to do that manually +# here. +NETWORK_SKIPS="\[Skipped:Network/OVNKubernetes\]|\[Feature:Networking-IPv6\]|\[Feature:IPv6DualStack.*\]|\[Feature:SCTPConnectivity\]" + +# Support serial and parallel test suites +TEST_SUITE="${TEST_SUITE:-parallel}" +COMMON_SKIPS="\[Slow\]|\[Disruptive\]|\[Flaky\]|\[Disabled:.+\]|\[Skipped:${PLATFORM}\]|${NETWORK_SKIPS}" +case "${TEST_SUITE}" in +serial) + DEFAULT_TEST_ARGS="-focus=\[Serial\] -skip=${COMMON_SKIPS}" + NODES=1 + ;; +parallel) + DEFAULT_TEST_ARGS="-skip=\[Serial\]|${COMMON_SKIPS}" + # Use the same number of nodes - 30 - as specified for the parallel + # suite defined in origin. + NODES=${NODES:-30} + ;; +*) + echo >&2 "Unsupported test suite '${TEST_SUITE}'" + exit 1 + ;; +esac + +# Set KUBE_E2E_TEST_ARGS to configure test arguments like +# -skip and -focus. +KUBE_E2E_TEST_ARGS="${KUBE_E2E_TEST_ARGS:-${DEFAULT_TEST_ARGS}}" + +# k8s-e2e.test and ginkgo are expected to be in the path in +# CI. Outside of CI, ensure k8s-e2e.test and ginkgo are built and +# available in PATH. +if ! which k8s-e2e.test &> /dev/null; then + make WHAT=vendor/github.com/onsi/ginkgo/v2/ginkgo + make WHAT=openshift-hack/e2e/k8s-e2e.test + ROOT_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/.."; pwd -P)" + PATH="${ROOT_PATH}/_output/local/bin/$(go env GOHOSTOS)/$(go env GOARCH):${PATH}" + export PATH +fi + +# Execute OpenShift prerequisites +# Disable container security +oc adm policy add-scc-to-group privileged system:authenticated system:serviceaccounts +oc adm policy add-scc-to-group anyuid system:authenticated system:serviceaccounts +unschedulable="$( ( oc get nodes -o name -l 'node-role.kubernetes.io/master'; ) | wc -l )" + +test_report_dir="${ARTIFACTS:-/tmp/artifacts}" +mkdir -p "${test_report_dir}" + +# Retrieve the hostname of the server to enable kubectl testing +SERVER= +SERVER="$( kubectl config view | grep server | head -n 1 | awk '{print $2}' )" + +# shellcheck disable=SC2086 +ginkgo \ + --flake-attempts=3 \ + --timeout="24h" \ + --output-interceptor-mode=none \ + -nodes "${NODES}" -no-color ${KUBE_E2E_TEST_ARGS} \ + "$( which k8s-e2e.test )" -- \ + -report-dir "${test_report_dir}" \ + -host "${SERVER}" \ + -allowed-not-ready-nodes ${unschedulable} \ + 2>&1 | tee -a "${test_report_dir}/k8s-e2e.log" diff --git a/openshift-hack/update-kubensenter.sh b/openshift-hack/update-kubensenter.sh new file mode 100755 index 0000000000000..a7ca2693964be --- /dev/null +++ b/openshift-hack/update-kubensenter.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "$KUBE_ROOT/hack/lib/init.sh" + +# Convert a path relative to $KUBE_ROOT to a real path +localpath() { + realpath "$KUBE_ROOT/$1" +} + +# Configuration for fetching this file, relative to this repository root +ENVFILE=openshift-hack/kubensenter.env + +# The source of the file, relative to the remote repository root +SOURCE=utils/kubensenter/kubensenter + +# The destination of the file, relative to this repository root +DESTINATION=openshift-hack/images/hyperkube/kubensenter + +usage() { + source_env + echo "Usage:" + echo " $0 [--to-latest]" + echo + echo "Updates the local copy of $DESTINATION as configured in $ENVFILE:" + echo " REPO: $REPO" + echo " COMMIT: $COMMIT" + echo + echo "Options:" + echo " --to-latest (or env UPDATE_TO_LATEST=1)" + echo " Update $ENVFILE to the latest commit or tag in $REPO configured by the TARGET entry" + echo " (currently \"$TARGET\"), and synchronize to the updated commit." + echo " - If TARGET resolves to a branch, pin to the latest commit hash from that branch" + echo " - If TARGET resolves to a tag, pin to the latest tag that matches that pattern" + echo " - TARGET may be a glob-like expression such as \"v1.1.*\" that would match any of the following:" + echo " v1.1.0 v1.1.3 v1.1.22-rc1" + exit 1 +} + +source_env() { + source "$(localpath "$ENVFILE")" + # Intentionally global scope: + REPO=${REPO:-"github.com/containers/kubensmnt"} + COMMIT=${COMMIT:-"main"} + TARGET=${TARGET:-"main"} +} + +edit_envfile() { + local envfile=$1 + local refname=$2 + + # Shell-quote refname in case it contains any shell-special characters + local newcommit=$(printf 'COMMIT=%q' "$refname") + if [[ $# -gt 2 ]]; then + shift 2 + # Add the comment suffix + newcommit="$newcommit # $*" + fi + + local patch + patch=$(printf "%q" "$newcommit") + # Note: Using ':' since it is not a valid tag character according to git-check-ref-format(1) + sed -i "s:^COMMIT=.*:$patch:" "$envfile" +} + +update_env() { + local repouri latest refhash reftype refname + source_env + repouri=https://$REPO.git + echo "Updating to latest $TARGET from $repouri" + + latest=$(git \ + -c "versionsort.suffix=-alpha" \ + -c "versionsort.suffix=-beta" \ + -c "versionsort.suffix=-rc" \ + ls-remote \ + --heads --tags \ + --sort='-version:refname' \ + "$repouri" "$TARGET" \ + | head -n 1) + if [[ -z $latest ]]; then + echo "ERROR: No matching ref found for $TARGET" + return 1 + fi + refhash=$(cut -f1 <<<"$latest") + reftype=$(cut -d/ -f2 <<<"$latest") + refname=$(cut -d/ -f3 <<<"$latest") + + if [[ $reftype == "tags" ]]; then + echo " Latest tag is $refname ($refhash)" + edit_envfile "$ENVFILE" "$refname" "($refhash)" + else + echo " Latest on branch $refname is $refhash" + edit_envfile "$ENVFILE" "$refhash" + fi +} + +do_fetch() { + source_env + local repohost reponame uri + repohost=$(cut -d/ -f1 <<<"$REPO") + reponame=${REPO#$repohost/} + case $repohost in + github.com) + uri=https://raw.githubusercontent.com/$reponame/$COMMIT/$SOURCE + ;; + *) + echo "No support for repositories hosted on $repohost" + return 2 + ;; + esac + + echo "Fetching $DESTINATION from $uri" + curl -fsLo "$(localpath "$DESTINATION")" "$uri" +} + +main() { + local to_latest=${UPDATE_TO_LATEST:-} + if [[ $# -gt 0 ]]; then + if [[ $1 == "--help" || $1 == "-h" ]]; then + usage + elif [[ $1 == "--to-latest" ]]; then + to_latest=1 + fi + fi + + if [[ $to_latest ]]; then + update_env + fi + + do_fetch +} + +# bash modulino +[[ "${BASH_SOURCE[0]}" == "$0" ]] && main "$@" diff --git a/openshift-hack/update-test-annotations.sh b/openshift-hack/update-test-annotations.sh new file mode 100755 index 0000000000000..82aa9b36bbdd3 --- /dev/null +++ b/openshift-hack/update-test-annotations.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +kube::golang::setup_env + +# Update e2e test annotations that indicate openshift compatibility +go generate -mod vendor ./openshift-hack/e2e diff --git a/openshift-hack/verify-kubensenter.sh b/openshift-hack/verify-kubensenter.sh new file mode 100755 index 0000000000000..07093f09809e7 --- /dev/null +++ b/openshift-hack/verify-kubensenter.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +# Update kubensenter and error if a change is detected +"${KUBE_ROOT}"/hack/update-kubensenter.sh +git diff --quiet "${KUBE_ROOT}/openshift-hack/images/hyperkube/kubensenter" diff --git a/openshift-hack/verify-test-annotations.sh b/openshift-hack/verify-test-annotations.sh new file mode 100755 index 0000000000000..1b04bb0d60308 --- /dev/null +++ b/openshift-hack/verify-test-annotations.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +# Make sure that all packages that define k8s tests are properly imported +EXCLUDE_PACKAGES="\ +k8s.io/kubernetes/test/e2e/framework,\ +k8s.io/kubernetes/test/e2e/framework/debug/init,\ +k8s.io/kubernetes/test/e2e/framework/metrics/init,\ +k8s.io/kubernetes/test/e2e/framework/node/init,\ +k8s.io/kubernetes/test/e2e/framework/testfiles,\ +k8s.io/kubernetes/test/e2e/storage/external,\ +k8s.io/kubernetes/test/e2e/testing-manifests,\ +k8s.io/kubernetes/test/e2e/windows" + +GO111MODULE=on go run ./openshift-hack/cmd/go-imports-diff \ + -exclude "$EXCLUDE_PACKAGES" \ + test/e2e/e2e_test.go \ + openshift-hack/e2e/include.go + +# Verify e2e test annotations that indicate openshift compatibility +"${KUBE_ROOT}"/hack/update-test-annotations.sh +git diff --quiet "${KUBE_ROOT}/openshift-hack/e2e/annotate/generated/" diff --git a/openshift-hack/verify.sh b/openshift-hack/verify.sh new file mode 100755 index 0000000000000..9361e8f4faea9 --- /dev/null +++ b/openshift-hack/verify.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# shellcheck source=openshift-hack/lib/init.sh +source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh" + +# Required for openapi verification +PATH="$(pwd)/third_party/etcd:${PATH}" + +# Attempt to verify without docker if it is not available. +OS_RUN_WITHOUT_DOCKER= +if ! which docker &> /dev/null; then + os::log::warning "docker not available, attempting to run verify without it" + OS_RUN_WITHOUT_DOCKER=y + + # Without docker, shellcheck may need to be installed. + PATH="$( os::deps::path_with_shellcheck )" +fi +export OS_RUN_WITHOUT_DOCKER + +export PATH + +ARTIFACTS="${ARTIFACTS:-/tmp/artifacts}" +mkdir -p "${ARTIFACTS}" +export KUBE_JUNIT_REPORT_DIR="${ARTIFACTS}" + +make verify diff --git a/openshift.spec b/openshift.spec new file mode 100644 index 0000000000000..622f809dc9ca1 --- /dev/null +++ b/openshift.spec @@ -0,0 +1,180 @@ +#debuginfo not supported with Go +%global debug_package %{nil} +# modifying the Go binaries breaks the DWARF debugging +%global __os_install_post %{_rpmconfigdir}/brp-compress + +%global gopath %{_datadir}/gocode +%global import_path k8s.io/kubernetes + +%global golang_version 1.15 + +%{!?commit: +# DO NOT MODIFY: the value on the line below is sed-like replaced by openshift/doozer +%global commit 86b5e46426ba828f49195af21c56f7c6674b48f7 +} +%global shortcommit %(c=%{commit}; echo ${c:0:7}) +# DO NOT MODIFY: the value on the line below is sed-like replaced by openshift/doozer +%{!?os_git_vars: +%global os_git_vars OS_GIT_VERSION='' OS_GIT_COMMIT='' OS_GIT_MAJOR='' OS_GIT_MINOR='' OS_GIT_TREE_STATE='' +} + +%if 0%{?skip_build} +%global do_build 0 +%else +%global do_build 1 +%endif +%if 0%{?skip_prep} +%global do_prep 0 +%else +%global do_prep 1 +%endif +%if 0%{?skip_dist} +%global package_dist %{nil} +%else +%global package_dist %{dist} +%endif + +%{!?version: %global version 4.0.0} +%{!?release: %global release 1} + +Name: openshift +Version: %{version} +Release: %{release}%{package_dist} +Summary: Open Source Container Management by Red Hat +License: ASL 2.0 +URL: https://%{import_path} + +# If go_arches not defined fall through to implicit golang archs +%if 0%{?go_arches:1} +ExclusiveArch: %{go_arches} +%else +ExclusiveArch: x86_64 aarch64 ppc64le s390x +%endif + +# TODO(marun) tar archives are no longer published for 4.x. Should this value be removed? +Source0: https://%{import_path}/archive/%{commit}/%{name}-%{version}.tar.gz +BuildRequires: systemd +BuildRequires: bsdtar +BuildRequires: golang >= %{golang_version} +BuildRequires: krb5-devel +BuildRequires: rsync + +%description +OpenShift is a distribution of Kubernetes optimized for enterprise application +development and deployment. OpenShift adds developer and operational centric +tools on top of Kubernetes to enable rapid application development, easy +deployment and scaling, and long-term lifecycle maintenance for small and large +teams and applications. It provides a secure and multi-tenant configuration for +Kubernetes allowing you to safely host many different applications and workloads +on a unified cluster. + +%package hyperkube +Summary: OpenShift Kubernetes server commands, via deps +Requires: kube-scheduler = %{version} +Requires: kube-kubelet = %{version} +Requires: kube-controller-manager = %{version} +Requires: kube-apiserver = %{version} +Provides: hyperkube = %{version} +Obsoletes: atomic-openshift-hyperkube <= %{version} +Obsoletes: atomic-openshift-node <= %{version} + +%package kube-scheduler +Summary: OpenShift Kubernetes Scheduler +Provides: kube-scheduler = %{version} + +%package kubelet +Summary: OpenShift Kubernetes Kubelet +Requires: util-linux +Requires: socat +Requires: iptables +Provides: kube-kubelet = %{version} + +%package kube-controller-manager +Summary: OpenShift Kubernetes Controller Manager +Provides: kube-controller-manager = %{version} + +%package kube-apiserver +Summary: OpenShift Kubernetes API Server +Provides: kube-apiserver = %{version} + +%description hyperkube +%{summary} + +%description kube-scheduler +%{summary} + +%description kubelet +%{summary} + +%description kube-controller-manager +%{summary} + +%description kube-apiserver +%{summary} + +%prep +%if 0%{do_prep} +%setup -q +%endif + +%build +%if 0%{do_build} +# Create Binaries only for building arch +%ifarch x86_64 + BUILD_PLATFORM="linux/amd64" +%endif +%ifarch ppc64le + BUILD_PLATFORM="linux/ppc64le" +%endif +%ifarch %{arm} aarch64 + BUILD_PLATFORM="linux/arm64" +%endif +%ifarch s390x + BUILD_PLATFORM="linux/s390x" +%endif +KUBE_BUILD_PLATFORMS="${BUILD_PLATFORM}" %{os_git_vars} make all WHAT='cmd/kube-apiserver cmd/kube-controller-manager cmd/kube-scheduler cmd/kubelet' +%endif + +%install + +PLATFORM="$(go env GOHOSTOS)/$(go env GOHOSTARCH)" +install -d %{buildroot}%{_bindir} +install -d %{buildroot}%{_sysctldir} + +# Install linux components +for bin in kube-apiserver kube-controller-manager kube-scheduler kubelet +do + echo "+++ INSTALLING ${bin}" + install -p -m 755 _output/local/bin/${PLATFORM}/${bin} %{buildroot}%{_bindir}/${bin} +done + +install -p -m 755 openshift-hack/images/hyperkube/hyperkube %{buildroot}%{_bindir}/hyperkube +install -p -m 755 openshift-hack/images/hyperkube/kubensenter %{buildroot}%{_bindir}/kubensenter +install -p -m 755 openshift-hack/sysctls/50-kubelet.conf %{buildroot}%{_sysctldir}/50-kubelet.conf + +%post kubelet +%sysctl_apply 50-kubelet.conf + +%files hyperkube +%license LICENSE +%{_bindir}/hyperkube +%defattr(-,root,root,0700) + +%files kubelet +%{_bindir}/kubelet +%{_bindir}/kubensenter +%{_sysctldir}/50-kubelet.conf +%defattr(-,root,root,0700) + +%files kube-scheduler +%{_bindir}/kube-scheduler + +%files kube-controller-manager +%{_bindir}/kube-controller-manager + +%files kube-apiserver +%{_bindir}/kube-apiserver + + + +%changelog diff --git a/pkg/kubelet/DOWNSTREAM_OWNERS b/pkg/kubelet/DOWNSTREAM_OWNERS new file mode 100644 index 0000000000000..d484fa4fc246a --- /dev/null +++ b/pkg/kubelet/DOWNSTREAM_OWNERS @@ -0,0 +1,17 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Downstream reviewers, don't have to match those in OWNERS +reviewers: + - rphillips + - sjenning + - mrunalp + +# Sub-package approvers from upstream with permission to approve downstream backports following these rules: +# - they MUST be approvers upstream (here compare https://github.com/kubernetes/kubernetes/blob/17bb2fc050ec786b60db7d8d6d4d3ac8eeac205b/pkg/kubelet/OWNERS#L10-L11) +# - they may approve "UPSTREAM: : ..." changes that merged upstream. +# - carry patches for "UPSTREAM: : ..." and any unmerged PRs of the previous kind will have to be approved by the top-level approvers. +approvers: + - sjenning + - mrunalp + +component: node diff --git a/staging/src/k8s.io/code-generator/examples/hack/verify-codegen.sh b/staging/src/k8s.io/code-generator/examples/hack/verify-codegen.sh index 7dd5e657671fb..72ead8ed65062 100755 --- a/staging/src/k8s.io/code-generator/examples/hack/verify-codegen.sh +++ b/staging/src/k8s.io/code-generator/examples/hack/verify-codegen.sh @@ -43,6 +43,8 @@ else exit 1 fi +GOFLAGS=-mod=readonly + # smoke test echo "Smoke testing examples by compiling..." pushd "${SCRIPT_ROOT}" diff --git a/staging/src/k8s.io/code-generator/kube_codegen.sh b/staging/src/k8s.io/code-generator/kube_codegen.sh index 8207da5eddd14..1ae0294783028 100755 --- a/staging/src/k8s.io/code-generator/kube_codegen.sh +++ b/staging/src/k8s.io/code-generator/kube_codegen.sh @@ -27,6 +27,8 @@ set -o pipefail KUBE_CODEGEN_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)" +GOFLAGS=-mod=readonly + function kube::codegen::internal::findz() { # We use `find` rather than `git ls-files` because sometimes external # projects use this across repos. This is an imperfect wrapper of find, diff --git a/test/typecheck/main.go b/test/typecheck/main.go index ad2416ae112b7..618db09b89f74 100644 --- a/test/typecheck/main.go +++ b/test/typecheck/main.go @@ -63,7 +63,9 @@ func newConfig(platform string) *packages.Config { mode = mode | packages.NeedTypesInfo } env := append(os.Environ(), - "CGO_ENABLED=1", + // OpenShift doesn't build with CGO, since we use host-provided SSL + // binaries for FIPS compatibility. + // "CGO_ENABLED=1", fmt.Sprintf("GOOS=%s", goos), fmt.Sprintf("GOARCH=%s", goarch)) tagstr := "selinux"