diff --git a/CATALOG.md b/CATALOG.md index 2ee9c8549..ffef54e30 100644 --- a/CATALOG.md +++ b/CATALOG.md @@ -11,42 +11,42 @@ Depending on the workload type, not all tests are required to pass to satisfy be ### Total suites: 10 -|Suite|Tests per suite|Link| -|---|---|---| -|access-control|28|[access-control](#access-control)| -|affiliated-certification|4|[affiliated-certification](#affiliated-certification)| -|lifecycle|18|[lifecycle](#lifecycle)| -|manageability|2|[manageability](#manageability)| -|networking|12|[networking](#networking)| -|observability|5|[observability](#observability)| -|operator|12|[operator](#operator)| -|performance|6|[performance](#performance)| -|platform-alteration|14|[platform-alteration](#platform-alteration)| -|preflight|19|[preflight](#preflight)| +| Suite | Tests per suite | Link | +| ------------------------ | --------------- | ----------------------------------------------------- | +| access-control | 28 | [access-control](#access-control) | +| affiliated-certification | 4 | [affiliated-certification](#affiliated-certification) | +| lifecycle | 18 | [lifecycle](#lifecycle) | +| manageability | 2 | [manageability](#manageability) | +| networking | 12 | [networking](#networking) | +| observability | 5 | [observability](#observability) | +| operator | 12 | [operator](#operator) | +| performance | 6 | [performance](#performance) | +| platform-alteration | 14 | [platform-alteration](#platform-alteration) | +| preflight | 19 | [preflight](#preflight) | ### Extended specific tests only: 13 -|Mandatory|Optional| -|---|---|---| -|10|3| +| Mandatory | Optional | +| --------- | -------- | | +| 10 | 3 | ### Far-Edge specific tests only: 9 -|Mandatory|Optional| -|---|---|---| -|8|1| +| Mandatory | Optional | +| --------- | -------- | | +| 8 | 1 | ### Non-Telco specific tests only: 71 -|Mandatory|Optional| -|---|---|---| -|43|28| +| Mandatory | Optional | +| --------- | -------- | | +| 43 | 28 | ### Telco specific tests only: 27 -|Mandatory|Optional| -|---|---|---| -|26|1| +| Mandatory | Optional | +| --------- | -------- | | +| 26 | 1 | ## Test Case list @@ -56,2061 +56,2061 @@ Test Cases are the specifications used to perform a meaningful test. Test cases #### access-control-bpf-capability-check -|Property|Description| -|---|---| -|Unique ID|access-control-bpf-capability-check| -|Description|Ensures that containers do not use BPF capability. Workloads should avoid loading eBPF filters| -|Suggested Remediation|Remove the following capability from the container/pod definitions: BPF| -|Best Practice Reference|No Doc Link - Telco| -|Exception Process|Exception can be considered. Must identify which container requires the capability and detail why.| -|Impact Statement|BPF capability allows kernel-level programming that can bypass security controls, monitor other processes, and potentially compromise the entire host system.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-bpf-capability-check | +| Description | Ensures that containers do not use BPF capability. Workloads should avoid loading eBPF filters | +| Suggested Remediation | Remove the following capability from the container/pod definitions: BPF | +| Best Practice Reference | No Doc Link - Telco | +| Exception Process | Exception can be considered. Must identify which container requires the capability and detail why. | +| Impact Statement | BPF capability allows kernel-level programming that can bypass security controls, monitor other processes, and potentially compromise the entire host system. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### access-control-cluster-role-bindings -|Property|Description| -|---|---| -|Unique ID|access-control-cluster-role-bindings| -|Description|Tests that a Pod does not specify ClusterRoleBindings.| -|Suggested Remediation|In most cases, Pod's should not have ClusterRoleBindings. The suggested remediation is to remove the need for ClusterRoleBindings, if possible. Cluster roles and cluster role bindings discouraged unless absolutely needed by the workload (often reserved for cluster admin only).| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-and-role-based-access-control| -|Exception Process|Exception possible only for workloads that's cluster wide in nature and absolutely needs cluster level roles & role bindings| -|Impact Statement|Cluster-wide role bindings grant excessive privileges that can be exploited for lateral movement and privilege escalation across the entire cluster.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-cluster-role-bindings | +| Description | Tests that a Pod does not specify ClusterRoleBindings. | +| Suggested Remediation | In most cases, Pod's should not have ClusterRoleBindings. The suggested remediation is to remove the need for ClusterRoleBindings, if possible. Cluster roles and cluster role bindings discouraged unless absolutely needed by the workload (often reserved for cluster admin only). | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-and-role-based-access-control | +| Exception Process | Exception possible only for workloads that's cluster wide in nature and absolutely needs cluster level roles & role bindings | +| Impact Statement | Cluster-wide role bindings grant excessive privileges that can be exploited for lateral movement and privilege escalation across the entire cluster. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### access-control-container-host-port -|Property|Description| -|---|---| -|Unique ID|access-control-container-host-port| -|Description|Verifies if containers define a hostPort.| -|Suggested Remediation|Remove hostPort configuration from the container. Workloads should avoid accessing host resources - containers should not configure HostPort.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-accessing-resource-on-host| -|Exception Process|Exception for host resource access tests will only be considered in rare cases where it is absolutely needed| -|Impact Statement|Host port usage can create port conflicts with host services and expose containers directly to the host network, bypassing network security controls.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-container-host-port | +| Description | Verifies if containers define a hostPort. | +| Suggested Remediation | Remove hostPort configuration from the container. Workloads should avoid accessing host resources - containers should not configure HostPort. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-accessing-resource-on-host | +| Exception Process | Exception for host resource access tests will only be considered in rare cases where it is absolutely needed | +| Impact Statement | Host port usage can create port conflicts with host services and expose containers directly to the host network, bypassing network security controls. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-crd-roles -|Property|Description| -|---|---| -|Unique ID|access-control-crd-roles| -|Description|If an application creates CRDs it must supply a role to access those CRDs and no other API resources/permission. This test checks that there is at least one role present in each namespaces under test that only refers to CRDs under test.| -|Suggested Remediation|Roles providing access to CRDs should not refer to any other api or resources. Change the generation of the CRD role accordingly| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-custom-role-to-access-application-crds| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Improper CRD role configurations can grant excessive privileges, violate least-privilege principles, and create security vulnerabilities in custom resource access control.| -|Tags|extended,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-crd-roles | +| Description | If an application creates CRDs it must supply a role to access those CRDs and no other API resources/permission. This test checks that there is at least one role present in each namespaces under test that only refers to CRDs under test. | +| Suggested Remediation | Roles providing access to CRDs should not refer to any other api or resources. Change the generation of the CRD role accordingly | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-custom-role-to-access-application-crds | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Improper CRD role configurations can grant excessive privileges, violate least-privilege principles, and create security vulnerabilities in custom resource access control. | +| Tags | extended,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### access-control-ipc-lock-capability-check -|Property|Description| -|---|---| -|Unique ID|access-control-ipc-lock-capability-check| -|Description|Ensures that containers do not use IPC_LOCK capability. Workloads should avoid accessing host resources - spec.HostIpc should be false.| -|Suggested Remediation|Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipc_lock| -|Exception Process|Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.| -|Impact Statement|IPC_LOCK capability can be exploited to lock system memory, potentially causing denial of service and affecting other workloads on the same node.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | access-control-ipc-lock-capability-check | +| Description | Ensures that containers do not use IPC_LOCK capability. Workloads should avoid accessing host resources - spec.HostIpc should be false. | +| Suggested Remediation | Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipc_lock | +| Exception Process | Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. | +| Impact Statement | IPC_LOCK capability can be exploited to lock system memory, potentially causing denial of service and affecting other workloads on the same node. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### access-control-namespace -|Property|Description| -|---|---| -|Unique ID|access-control-namespace| -|Description|Tests that all workload resources (PUTs and CRs) belong to valid namespaces. A valid namespace meets the following conditions: (1) It was declared in the yaml config file under the targetNameSpaces tag. (2) It does not have any of the following prefixes: default, openshift-, istio- and aspenmesh-| -|Suggested Remediation|Ensure that your workload utilizes namespaces declared in the yaml config file. Additionally, the namespaces should not start with "default, openshift-, istio- or aspenmesh-".| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs| -|Exception Process|No exceptions| -|Impact Statement|Using inappropriate namespaces can lead to resource conflicts, security boundary violations, and administrative complexity in multi-tenant environments.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-namespace | +| Description | Tests that all workload resources (PUTs and CRs) belong to valid namespaces. A valid namespace meets the following conditions: (1) It was declared in the yaml config file under the targetNameSpaces tag. (2) It does not have any of the following prefixes: default, openshift-, istio- and aspenmesh- | +| Suggested Remediation | Ensure that your workload utilizes namespaces declared in the yaml config file. Additionally, the namespaces should not start with "default, openshift-, istio- or aspenmesh-". | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs | +| Exception Process | No exceptions | +| Impact Statement | Using inappropriate namespaces can lead to resource conflicts, security boundary violations, and administrative complexity in multi-tenant environments. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### access-control-namespace-resource-quota -|Property|Description| -|---|---| -|Unique ID|access-control-namespace-resource-quota| -|Description|Checks to see if workload pods are running in namespaces that have resource quotas applied.| -|Suggested Remediation|Apply a ResourceQuota to the namespace your workload is running in. The workload's namespace should have resource quota defined.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-memory-allocation| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Without resource quotas, workloads can consume excessive cluster resources, causing performance issues and potential denial of service for other applications.| -|Tags|extended,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-namespace-resource-quota | +| Description | Checks to see if workload pods are running in namespaces that have resource quotas applied. | +| Suggested Remediation | Apply a ResourceQuota to the namespace your workload is running in. The workload's namespace should have resource quota defined. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-memory-allocation | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Without resource quotas, workloads can consume excessive cluster resources, causing performance issues and potential denial of service for other applications. | +| Tags | extended,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### access-control-net-admin-capability-check -|Property|Description| -|---|---| -|Unique ID|access-control-net-admin-capability-check| -|Description|Ensures that containers do not use NET_ADMIN capability. Note: this test also ensures iptables and nftables are not configured by workload pods: - NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods. nftables should be configured by an administrator outside the scope of the workload. nftables are usually configured by operators, for instance the Performance Addon Operator (PAO) or istio. - Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables should be configured by an administrator outside the scope of the workload. iptables are usually configured by operators, for instance the Performance Addon Operator (PAO) or istio.| -|Suggested Remediation|Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-net_admin| -|Exception Process|Exception will be considered for user plane or networking functions (e.g. SR-IOV, Multicast). Must identify which container requires the capability and detail why.| -|Impact Statement|NET_ADMIN capability allows network configuration changes that can compromise cluster networking, enable privilege escalation, and bypass network security controls.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | access-control-net-admin-capability-check | +| Description | Ensures that containers do not use NET_ADMIN capability. Note: this test also ensures iptables and nftables are not configured by workload pods: - NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods. nftables should be configured by an administrator outside the scope of the workload. nftables are usually configured by operators, for instance the Performance Addon Operator (PAO) or istio. - Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables should be configured by an administrator outside the scope of the workload. iptables are usually configured by operators, for instance the Performance Addon Operator (PAO) or istio. | +| Suggested Remediation | Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-net_admin | +| Exception Process | Exception will be considered for user plane or networking functions (e.g. SR-IOV, Multicast). Must identify which container requires the capability and detail why. | +| Impact Statement | NET_ADMIN capability allows network configuration changes that can compromise cluster networking, enable privilege escalation, and bypass network security controls. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### access-control-net-raw-capability-check -|Property|Description| -|---|---| -|Unique ID|access-control-net-raw-capability-check| -|Description|Ensures that containers do not use NET_RAW capability. Note: this test also ensures iptables and nftables are not configured by workload pods: - NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods. nftables should be configured by an administrator outside the scope of the workload. nftables are usually configured by operators, for instance the Performance Addon Operator (PAO) or istio. - Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables should be configured by an administrator outside the scope of the workload. iptables are usually configured by operators, for instance the Performance Addon Operator (PAO) or istio.| -|Suggested Remediation|Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-user-plane-cnfs| -|Exception Process|Exception will be considered for user plane or networking functions. Must identify which container requires the capability and detail why.| -|Impact Statement|NET_RAW capability enables packet manipulation and network sniffing, which can be used for attacks against other workloads and compromise network security.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-net-raw-capability-check | +| Description | Ensures that containers do not use NET_RAW capability. Note: this test also ensures iptables and nftables are not configured by workload pods: - NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods. nftables should be configured by an administrator outside the scope of the workload. nftables are usually configured by operators, for instance the Performance Addon Operator (PAO) or istio. - Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables should be configured by an administrator outside the scope of the workload. iptables are usually configured by operators, for instance the Performance Addon Operator (PAO) or istio. | +| Suggested Remediation | Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-user-plane-cnfs | +| Exception Process | Exception will be considered for user plane or networking functions. Must identify which container requires the capability and detail why. | +| Impact Statement | NET_RAW capability enables packet manipulation and network sniffing, which can be used for attacks against other workloads and compromise network security. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### access-control-no-1337-uid -|Property|Description| -|---|---| -|Unique ID|access-control-no-1337-uid| -|Description|Checks that all pods are not using the securityContext UID 1337| -|Suggested Remediation|Use another process UID that is not 1337.| -|Best Practice Reference|No Doc Link - Extended| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|UID 1337 is reserved for use by Istio service mesh components; using it for applications can cause conflicts with Istio sidecars and break service mesh functionality.| -|Tags|extended,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-no-1337-uid | +| Description | Checks that all pods are not using the securityContext UID 1337 | +| Suggested Remediation | Use another process UID that is not 1337. | +| Best Practice Reference | No Doc Link - Extended | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | UID 1337 is reserved for use by Istio service mesh components; using it for applications can cause conflicts with Istio sidecars and break service mesh functionality. | +| Tags | extended,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### access-control-one-process-per-container -|Property|Description| -|---|---| -|Unique ID|access-control-one-process-per-container| -|Description|Check that all containers under test have only one process running| -|Suggested Remediation|Launch only one process per container. Should adhere to 1 process per container best practice wherever possible.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-one-process-per-container| -|Exception Process|No exception needed for optional/extended tests. Not applicable to SNO applications.| -|Impact Statement|Multiple processes per container complicate monitoring, debugging, and security assessment, and can lead to zombie processes and resource leaks.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | access-control-one-process-per-container | +| Description | Check that all containers under test have only one process running | +| Suggested Remediation | Launch only one process per container. Should adhere to 1 process per container best practice wherever possible. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-one-process-per-container | +| Exception Process | No exception needed for optional/extended tests. Not applicable to SNO applications. | +| Impact Statement | Multiple processes per container complicate monitoring, debugging, and security assessment, and can lead to zombie processes and resource leaks. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### access-control-pod-automount-service-account-token -|Property|Description| -|---|---| -|Unique ID|access-control-pod-automount-service-account-token| -|Description|Check that all pods under test have automountServiceAccountToken set to false. Only pods that require access to the kubernetes API server should have automountServiceAccountToken set to true| -|Suggested Remediation|Check that pod has automountServiceAccountToken set to false or pod is attached to service account which has automountServiceAccountToken set to false, unless the pod needs access to the kubernetes API server. Pods which do not need API access should set automountServiceAccountToken to false in pod spec.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods| -|Exception Process|Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.| -|Impact Statement|Auto-mounted service account tokens expose Kubernetes API credentials to application code, creating potential attack vectors if applications are compromised.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-pod-automount-service-account-token | +| Description | Check that all pods under test have automountServiceAccountToken set to false. Only pods that require access to the kubernetes API server should have automountServiceAccountToken set to true | +| Suggested Remediation | Check that pod has automountServiceAccountToken set to false or pod is attached to service account which has automountServiceAccountToken set to false, unless the pod needs access to the kubernetes API server. Pods which do not need API access should set automountServiceAccountToken to false in pod spec. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods | +| Exception Process | Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used. | +| Impact Statement | Auto-mounted service account tokens expose Kubernetes API credentials to application code, creating potential attack vectors if applications are compromised. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### access-control-pod-host-ipc -|Property|Description| -|---|---| -|Unique ID|access-control-pod-host-ipc| -|Description|Verifies that the spec.HostIpc parameter is set to false| -|Suggested Remediation|Set the spec.HostIpc parameter to false in the pod configuration. Workloads should avoid accessing host resources - spec.HostIpc should be false.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security| -|Exception Process|Exception for host resource access tests will only be considered in rare cases where it is absolutely needed| -|Impact Statement|Host IPC access allows containers to communicate with host processes, potentially exposing sensitive information and enabling privilege escalation.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-pod-host-ipc | +| Description | Verifies that the spec.HostIpc parameter is set to false | +| Suggested Remediation | Set the spec.HostIpc parameter to false in the pod configuration. Workloads should avoid accessing host resources - spec.HostIpc should be false. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security | +| Exception Process | Exception for host resource access tests will only be considered in rare cases where it is absolutely needed | +| Impact Statement | Host IPC access allows containers to communicate with host processes, potentially exposing sensitive information and enabling privilege escalation. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-pod-host-network -|Property|Description| -|---|---| -|Unique ID|access-control-pod-host-network| -|Description|Verifies that the spec.HostNetwork parameter is not set (not present)| -|Suggested Remediation|Set the spec.HostNetwork parameter to false in the pod configuration. Workloads should avoid accessing host resources - spec.HostNetwork should be false.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security| -|Exception Process|Exception for host resource access tests will only be considered in rare cases where it is absolutely needed| -|Impact Statement|Host network access removes network isolation, exposes containers to host network interfaces, and can compromise cluster networking security.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-pod-host-network | +| Description | Verifies that the spec.HostNetwork parameter is not set (not present) | +| Suggested Remediation | Set the spec.HostNetwork parameter to false in the pod configuration. Workloads should avoid accessing host resources - spec.HostNetwork should be false. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security | +| Exception Process | Exception for host resource access tests will only be considered in rare cases where it is absolutely needed | +| Impact Statement | Host network access removes network isolation, exposes containers to host network interfaces, and can compromise cluster networking security. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-pod-host-path -|Property|Description| -|---|---| -|Unique ID|access-control-pod-host-path| -|Description|Verifies that the spec.HostPath parameter is not set (not present)| -|Suggested Remediation|Set the spec.HostPath parameter to false in the pod configuration. Workloads should avoid accessing host resources - spec.HostPath should be false.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security| -|Exception Process|Exception for host resource access tests will only be considered in rare cases where it is absolutely needed| -|Impact Statement|Host path mounts can expose sensitive host files to containers, enable container escape attacks, and compromise host system integrity.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-pod-host-path | +| Description | Verifies that the spec.HostPath parameter is not set (not present) | +| Suggested Remediation | Set the spec.HostPath parameter to false in the pod configuration. Workloads should avoid accessing host resources - spec.HostPath should be false. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security | +| Exception Process | Exception for host resource access tests will only be considered in rare cases where it is absolutely needed | +| Impact Statement | Host path mounts can expose sensitive host files to containers, enable container escape attacks, and compromise host system integrity. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-pod-host-pid -|Property|Description| -|---|---| -|Unique ID|access-control-pod-host-pid| -|Description|Verifies that the spec.HostPid parameter is set to false| -|Suggested Remediation|Set the spec.HostPid parameter to false in the pod configuration. Workloads should avoid accessing host resources - spec.HostPid should be false.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security| -|Exception Process|Exception for host resource access tests will only be considered in rare cases where it is absolutely needed| -|Impact Statement|Host PID access allows containers to see and interact with all host processes, creating opportunities for privilege escalation and information disclosure.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-pod-host-pid | +| Description | Verifies that the spec.HostPid parameter is set to false | +| Suggested Remediation | Set the spec.HostPid parameter to false in the pod configuration. Workloads should avoid accessing host resources - spec.HostPid should be false. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security | +| Exception Process | Exception for host resource access tests will only be considered in rare cases where it is absolutely needed | +| Impact Statement | Host PID access allows containers to see and interact with all host processes, creating opportunities for privilege escalation and information disclosure. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-pod-role-bindings -|Property|Description| -|---|---| -|Unique ID|access-control-pod-role-bindings| -|Description|Ensures that a workload does not utilize RoleBinding(s) in a non-workload Namespace.| -|Suggested Remediation|Ensure the workload is not configured to use RoleBinding(s) in a non-workload Namespace. Scope of role must <= scope of creator of role.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-and-role-based-access-control| -|Exception Process|No exceptions| -|Impact Statement|Cross-namespace role bindings can violate tenant isolation and create unintended privilege escalation paths.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-pod-role-bindings | +| Description | Ensures that a workload does not utilize RoleBinding(s) in a non-workload Namespace. | +| Suggested Remediation | Ensure the workload is not configured to use RoleBinding(s) in a non-workload Namespace. Scope of role must <= scope of creator of role. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-and-role-based-access-control | +| Exception Process | No exceptions | +| Impact Statement | Cross-namespace role bindings can violate tenant isolation and create unintended privilege escalation paths. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-pod-service-account -|Property|Description| -|---|---| -|Unique ID|access-control-pod-service-account| -|Description|Tests that each workload Pod utilizes a valid Service Account. Default or empty service account is not valid.| -|Suggested Remediation|Ensure that the each workload Pod is configured to use a valid Service Account| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-scc-permissions-for-an-application| -|Exception Process|No exceptions| -|Impact Statement|Default service accounts often have excessive privileges; improper usage can lead to unauthorized API access and security violations.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-pod-service-account | +| Description | Tests that each workload Pod utilizes a valid Service Account. Default or empty service account is not valid. | +| Suggested Remediation | Ensure that the each workload Pod is configured to use a valid Service Account | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-scc-permissions-for-an-application | +| Exception Process | No exceptions | +| Impact Statement | Default service accounts often have excessive privileges; improper usage can lead to unauthorized API access and security violations. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-requests -|Property|Description| -|---|---| -|Unique ID|access-control-requests| -|Description|Check that containers have resource requests specified in their spec. Set proper resource requests based on container use case.| -|Suggested Remediation|Add requests to your container spec. See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requests-limits| -|Exception Process|Exceptions possible for platform and infrastructure containers. Must identify which container needs access and document why with details.| -|Impact Statement|Missing resource requests can lead to resource contention, node instability, and unpredictable application performance.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-requests | +| Description | Check that containers have resource requests specified in their spec. Set proper resource requests based on container use case. | +| Suggested Remediation | Add requests to your container spec. See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requests-limits | +| Exception Process | Exceptions possible for platform and infrastructure containers. Must identify which container needs access and document why with details. | +| Impact Statement | Missing resource requests can lead to resource contention, node instability, and unpredictable application performance. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### access-control-security-context -|Property|Description| -|---|---| -|Unique ID|access-control-security-context| -|Description|Checks the security context matches one of the 4 categories| -|Suggested Remediation|Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and document why. If the container had the right configuration of the allowed category from the 4 approved list then the test will pass. The 4 categories are defined in Requirement ID 94118 [here](#security-context-categories)| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-linux-capabilities| -|Exception Process|no exception needed for optional/extended test| -|Impact Statement|Incorrect security context configurations can weaken container isolation, enable privilege escalation, and create exploitable attack vectors.| -|Tags|extended,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-security-context | +| Description | Checks the security context matches one of the 4 categories | +| Suggested Remediation | Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and document why. If the container had the right configuration of the allowed category from the 4 approved list then the test will pass. The 4 categories are defined in Requirement ID 94118 [here](#security-context-categories) | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-linux-capabilities | +| Exception Process | no exception needed for optional/extended test | +| Impact Statement | Incorrect security context configurations can weaken container isolation, enable privilege escalation, and create exploitable attack vectors. | +| Tags | extended,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### access-control-security-context-non-root-user-id-check -|Property|Description| -|---|---| -|Unique ID|access-control-security-context-non-root-user-id-check| -|Description|Checks securityContext's runAsNonRoot and runAsUser fields at pod and container level to make sure containers are not run as root.| -|Suggested Remediation|Set the securityContext.runAsNonRoot field to true either at pod or container level. Alternatively, set a non-zero value to securityContext.runAsUser field either at pod or container level.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security| -|Exception Process|No exceptions - will only be considered under special circumstances. Must identify which container needs access and document why with details.| -|Impact Statement|Running containers as root increases the blast radius of security vulnerabilities and can lead to full host compromise if containers are breached.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-security-context-non-root-user-id-check | +| Description | Checks securityContext's runAsNonRoot and runAsUser fields at pod and container level to make sure containers are not run as root. | +| Suggested Remediation | Set the securityContext.runAsNonRoot field to true either at pod or container level. Alternatively, set a non-zero value to securityContext.runAsUser field either at pod or container level. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security | +| Exception Process | No exceptions - will only be considered under special circumstances. Must identify which container needs access and document why with details. | +| Impact Statement | Running containers as root increases the blast radius of security vulnerabilities and can lead to full host compromise if containers are breached. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-security-context-privilege-escalation -|Property|Description| -|---|---| -|Unique ID|access-control-security-context-privilege-escalation| -|Description|Checks if privileged escalation is enabled (AllowPrivilegeEscalation=true).| -|Suggested Remediation|Configure privilege escalation to false. Privileged escalation should not be allowed (AllowPrivilegeEscalation=false).| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security| -|Exception Process|No exceptions| -|Impact Statement|Allowing privilege escalation can lead to containers gaining root access, compromising the security boundary between containers and hosts.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | access-control-security-context-privilege-escalation | +| Description | Checks if privileged escalation is enabled (AllowPrivilegeEscalation=true). | +| Suggested Remediation | Configure privilege escalation to false. Privileged escalation should not be allowed (AllowPrivilegeEscalation=false). | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security | +| Exception Process | No exceptions | +| Impact Statement | Allowing privilege escalation can lead to containers gaining root access, compromising the security boundary between containers and hosts. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-security-context-read-only-file-system -|Property|Description| -|---|---| -|Unique ID|access-control-security-context-read-only-file-system| -|Description|Checks the security context readOnlyFileSystem in containers is enabled. Containers should not try modify its own filesystem.| -|Suggested Remediation|No exceptions - will only be considered under special circumstances. Must identify which container needs access and document why with details.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-linux-capabilities| -|Exception Process|No exceptions| -|Impact Statement|Writable root filesystems increase the attack surface and can be exploited to modify container behavior or persist malware.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-security-context-read-only-file-system | +| Description | Checks the security context readOnlyFileSystem in containers is enabled. Containers should not try modify its own filesystem. | +| Suggested Remediation | No exceptions - will only be considered under special circumstances. Must identify which container needs access and document why with details. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-linux-capabilities | +| Exception Process | No exceptions | +| Impact Statement | Writable root filesystems increase the attack surface and can be exploited to modify container behavior or persist malware. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### access-control-service-type -|Property|Description| -|---|---| -|Unique ID|access-control-service-type| -|Description|Tests that each workload Service does not utilize NodePort(s).| -|Suggested Remediation|Ensure Services are not configured to use NodePort(s). Workloads should avoid accessing host resources - tests that each workload Service does not utilize NodePort(s).| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security| -|Exception Process|Exception for host resource access tests will only be considered in rare cases where it is absolutely needed| -|Impact Statement|NodePort services expose applications directly on host ports, creating security risks and potential port conflicts with host services.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-service-type | +| Description | Tests that each workload Service does not utilize NodePort(s). | +| Suggested Remediation | Ensure Services are not configured to use NodePort(s). Workloads should avoid accessing host resources - tests that each workload Service does not utilize NodePort(s). | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security | +| Exception Process | Exception for host resource access tests will only be considered in rare cases where it is absolutely needed | +| Impact Statement | NodePort services expose applications directly on host ports, creating security risks and potential port conflicts with host services. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-ssh-daemons -|Property|Description| -|---|---| -|Unique ID|access-control-ssh-daemons| -|Description|Check that pods do not run SSH daemons.| -|Suggested Remediation|Ensure that no SSH daemons are running inside a pod. Pods should not run as SSH Daemons (replicaset or statefulset only).| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-interaction-and-configuration| -|Exception Process|No exceptions - special consideration can be given to certain containers which run as utility tool daemon| -|Impact Statement|SSH daemons in containers create additional attack surfaces, violate immutable infrastructure principles, and can be exploited for unauthorized access.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-ssh-daemons | +| Description | Check that pods do not run SSH daemons. | +| Suggested Remediation | Ensure that no SSH daemons are running inside a pod. Pods should not run as SSH Daemons (replicaset or statefulset only). | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-interaction-and-configuration | +| Exception Process | No exceptions - special consideration can be given to certain containers which run as utility tool daemon | +| Impact Statement | SSH daemons in containers create additional attack surfaces, violate immutable infrastructure principles, and can be exploited for unauthorized access. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### access-control-sys-admin-capability-check -|Property|Description| -|---|---| -|Unique ID|access-control-sys-admin-capability-check| -|Description|Ensures that containers do not use SYS_ADMIN capability| -|Suggested Remediation|Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. Containers should not use the SYS_ADMIN Linux capability.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-sys_admin| -|Exception Process|No exceptions| -|Impact Statement|SYS_ADMIN capability provides extensive privileges that can compromise container isolation, enable host system access, and create serious security vulnerabilities.| -|Tags|common,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-sys-admin-capability-check | +| Description | Ensures that containers do not use SYS_ADMIN capability | +| Suggested Remediation | Exception possible if a workload uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. Containers should not use the SYS_ADMIN Linux capability. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-sys_admin | +| Exception Process | No exceptions | +| Impact Statement | SYS_ADMIN capability provides extensive privileges that can compromise container isolation, enable host system access, and create serious security vulnerabilities. | +| Tags | common,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### access-control-sys-nice-realtime-capability -|Property|Description| -|---|---| -|Unique ID|access-control-sys-nice-realtime-capability| -|Description|Check that pods running on nodes with realtime kernel enabled have the SYS_NICE capability enabled in their spec. In the case that a workolad is running on a node using the real-time kernel, SYS_NICE will be used to allow DPDK application to switch to SCHED_FIFO.| -|Suggested Remediation|If pods are scheduled to realtime kernel nodes, they must add SYS_NICE capability to their spec.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_nice| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing SYS_NICE capability on real-time nodes prevents applications from setting appropriate scheduling priorities, causing performance degradation.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-sys-nice-realtime-capability | +| Description | Check that pods running on nodes with realtime kernel enabled have the SYS_NICE capability enabled in their spec. In the case that a workolad is running on a node using the real-time kernel, SYS_NICE will be used to allow DPDK application to switch to SCHED_FIFO. | +| Suggested Remediation | If pods are scheduled to realtime kernel nodes, they must add SYS_NICE capability to their spec. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_nice | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing SYS_NICE capability on real-time nodes prevents applications from setting appropriate scheduling priorities, causing performance degradation. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### access-control-sys-ptrace-capability -|Property|Description| -|---|---| -|Unique ID|access-control-sys-ptrace-capability| -|Description|Check that if process namespace sharing is enabled for a Pod then the SYS_PTRACE capability is allowed. This capability is required when using Process Namespace Sharing. This is used when processes from one Container need to be exposed to another Container. For example, to send signals like SIGHUP from a process in a Container to another process in another Container. For more information on these capabilities refer to https://cloud.redhat.com/blog/linux-capabilities-in-openshift and https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/| -|Suggested Remediation|Allow the SYS_PTRACE capability when enabling process namespace sharing for a Pod| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_ptrace| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing SYS_PTRACE capability when using shared process namespaces prevents inter-container process communication, breaking application functionality.| -|Tags|telco,access-control| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | access-control-sys-ptrace-capability | +| Description | Check that if process namespace sharing is enabled for a Pod then the SYS_PTRACE capability is allowed. This capability is required when using Process Namespace Sharing. This is used when processes from one Container need to be exposed to another Container. For example, to send signals like SIGHUP from a process in a Container to another process in another Container. For more information on these capabilities refer to https://cloud.redhat.com/blog/linux-capabilities-in-openshift and https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ | +| Suggested Remediation | Allow the SYS_PTRACE capability when enabling process namespace sharing for a Pod | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_ptrace | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing SYS_PTRACE capability when using shared process namespaces prevents inter-container process communication, breaking application functionality. | +| Tags | telco,access-control | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | ### affiliated-certification #### affiliated-certification-container-is-certified-digest -|Property|Description| -|---|---| -|Unique ID|affiliated-certification-container-is-certified-digest| -|Description|Tests whether container images that are autodiscovered have passed the Red Hat Container Certification Program by their digest(CCP).| -|Suggested Remediation|Ensure that your container has passed the Red Hat Container Certification Program (CCP).| -|Best Practice Reference|https://docs.redhat.com/en/documentation/red_hat_software_certification/2025/html/red_hat_software_certification_workflow_guide/index| -|Exception Process|There is no documented exception process for this. A partner can run the Red Hat Best Practices Test Suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in the Red Hat Best Practices Test Suite must be re-run once the other certifications have been granted.| -|Impact Statement|Uncertified containers may contain security vulnerabilities, lack enterprise support, and fail to meet compliance requirements.| -|Tags|common,affiliated-certification| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | affiliated-certification-container-is-certified-digest | +| Description | Tests whether container images that are autodiscovered have passed the Red Hat Container Certification Program by their digest(CCP). | +| Suggested Remediation | Ensure that your container has passed the Red Hat Container Certification Program (CCP). | +| Best Practice Reference | https://docs.redhat.com/en/documentation/red_hat_software_certification/2025/html/red_hat_software_certification_workflow_guide/index | +| Exception Process | There is no documented exception process for this. A partner can run the Red Hat Best Practices Test Suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in the Red Hat Best Practices Test Suite must be re-run once the other certifications have been granted. | +| Impact Statement | Uncertified containers may contain security vulnerabilities, lack enterprise support, and fail to meet compliance requirements. | +| Tags | common,affiliated-certification | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### affiliated-certification-helm-version -|Property|Description| -|---|---| -|Unique ID|affiliated-certification-helm-version| -|Description|Test to check if the helm chart is v3| -|Suggested Remediation|Check Helm Chart is v3 and not v2 which is not supported due to security risks associated with Tiller.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-helm| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Helm v2 has known security vulnerabilities and lacks proper RBAC controls, creating significant security risks in production environments.| -|Tags|common,affiliated-certification| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | affiliated-certification-helm-version | +| Description | Test to check if the helm chart is v3 | +| Suggested Remediation | Check Helm Chart is v3 and not v2 which is not supported due to security risks associated with Tiller. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-helm | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Helm v2 has known security vulnerabilities and lacks proper RBAC controls, creating significant security risks in production environments. | +| Tags | common,affiliated-certification | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### affiliated-certification-helmchart-is-certified -|Property|Description| -|---|---| -|Unique ID|affiliated-certification-helmchart-is-certified| -|Description|Tests whether helm charts listed in the cluster passed the Red Hat Helm Certification Program.| -|Suggested Remediation|Ensure that the helm charts under test passed the Red Hat's helm Certification Program (e.g. listed in https://charts.openshift.io/index.yaml).| -|Best Practice Reference|https://docs.redhat.com/en/documentation/red_hat_software_certification/2025/html/red_hat_software_certification_workflow_guide/index| -|Exception Process|There is no documented exception process for this. A partner can run the Red Hat Best Practices Test Suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in the Red Hat Best Practices Test Suite must be re-run once the other certifications have been granted.| -|Impact Statement|Uncertified helm charts may contain security vulnerabilities, configuration errors, and lack proper testing, leading to deployment failures.| -|Tags|common,affiliated-certification| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | affiliated-certification-helmchart-is-certified | +| Description | Tests whether helm charts listed in the cluster passed the Red Hat Helm Certification Program. | +| Suggested Remediation | Ensure that the helm charts under test passed the Red Hat's helm Certification Program (e.g. listed in https://charts.openshift.io/index.yaml). | +| Best Practice Reference | https://docs.redhat.com/en/documentation/red_hat_software_certification/2025/html/red_hat_software_certification_workflow_guide/index | +| Exception Process | There is no documented exception process for this. A partner can run the Red Hat Best Practices Test Suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in the Red Hat Best Practices Test Suite must be re-run once the other certifications have been granted. | +| Impact Statement | Uncertified helm charts may contain security vulnerabilities, configuration errors, and lack proper testing, leading to deployment failures. | +| Tags | common,affiliated-certification | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### affiliated-certification-operator-is-certified -|Property|Description| -|---|---| -|Unique ID|affiliated-certification-operator-is-certified| -|Description|Tests whether the workload Operators listed in the configuration file have passed the Red Hat Operator Certification Program (OCP).| -|Suggested Remediation|Ensure that your Operator has passed Red Hat's Operator Certification Program (OCP).| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|There is no documented exception process for this. A partner can run the Red Hat Best Practices Test Suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in the Red Hat Best Practices Test Suite must be re-run once the other certifications have been granted.| -|Impact Statement|Uncertified operators may have security flaws, compatibility issues, and lack enterprise support, creating operational risks.| -|Tags|common,affiliated-certification| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | affiliated-certification-operator-is-certified | +| Description | Tests whether the workload Operators listed in the configuration file have passed the Red Hat Operator Certification Program (OCP). | +| Suggested Remediation | Ensure that your Operator has passed Red Hat's Operator Certification Program (OCP). | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | There is no documented exception process for this. A partner can run the Red Hat Best Practices Test Suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in the Red Hat Best Practices Test Suite must be re-run once the other certifications have been granted. | +| Impact Statement | Uncertified operators may have security flaws, compatibility issues, and lack enterprise support, creating operational risks. | +| Tags | common,affiliated-certification | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | ### lifecycle #### lifecycle-affinity-required-pods -|Property|Description| -|---|---| -|Unique ID|lifecycle-affinity-required-pods| -|Description|Checks that affinity rules are in place if AffinityRequired: 'true' labels are set on Pods.| -|Suggested Remediation|Pods which need to be co-located on the same node need Affinity rules. If a pod/statefulset/deployment is required to use affinity rules, please add AffinityRequired: 'true' as a label.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing affinity rules can cause incorrect pod placement, leading to performance issues and failure to meet co-location requirements.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-affinity-required-pods | +| Description | Checks that affinity rules are in place if AffinityRequired: 'true' labels are set on Pods. | +| Suggested Remediation | Pods which need to be co-located on the same node need Affinity rules. If a pod/statefulset/deployment is required to use affinity rules, please add AffinityRequired: 'true' as a label. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing affinity rules can cause incorrect pod placement, leading to performance issues and failure to meet co-location requirements. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-container-poststart -|Property|Description| -|---|---| -|Unique ID|lifecycle-container-poststart| -|Description|Ensure that the containers lifecycle postStart management feature is configured. A container must receive important events from the platform and conform/react to these events properly. For example, a container should catch SIGTERM or SIGKILL from the platform and shutdown as quickly as possible. Other typically important events from the platform are PostStart to initialize before servicing requests and PreStop to release resources cleanly before shutting down.| -|Suggested Remediation|PostStart is normally used to configure the container, set up dependencies, and record the new creation. You could use this event to check that a required API is available before the container’s main work begins. Kubernetes will not change the container’s state to Running until the PostStart script has executed successfully. For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. PostStart is used to configure container, set up dependencies, record new creation. It can also be used to check that a required API is available before the container’s work begins.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices| -|Exception Process|Identify which pod is not conforming to the process and submit information as to why it cannot use a postStart startup specification.| -|Impact Statement|Missing PostStart hooks can cause containers to start serving traffic before proper initialization, leading to application errors.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-container-poststart | +| Description | Ensure that the containers lifecycle postStart management feature is configured. A container must receive important events from the platform and conform/react to these events properly. For example, a container should catch SIGTERM or SIGKILL from the platform and shutdown as quickly as possible. Other typically important events from the platform are PostStart to initialize before servicing requests and PreStop to release resources cleanly before shutting down. | +| Suggested Remediation | PostStart is normally used to configure the container, set up dependencies, and record the new creation. You could use this event to check that a required API is available before the container’s main work begins. Kubernetes will not change the container’s state to Running until the PostStart script has executed successfully. For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. PostStart is used to configure container, set up dependencies, record new creation. It can also be used to check that a required API is available before the container’s work begins. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices | +| Exception Process | Identify which pod is not conforming to the process and submit information as to why it cannot use a postStart startup specification. | +| Impact Statement | Missing PostStart hooks can cause containers to start serving traffic before proper initialization, leading to application errors. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-container-prestop -|Property|Description| -|---|---| -|Unique ID|lifecycle-container-prestop| -|Description|Ensure that the containers lifecycle preStop management feature is configured. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. When pods are shut down by the platform they are sent a SIGTERM signal which means that the process in the container should start shutting down, closing connections and stopping all activity. If the pod doesn’t shut down within the default 30 seconds then the platform may send a SIGKILL signal which will stop the pod immediately. This method isn’t as clean and the default time between the SIGTERM and SIGKILL messages can be modified based on the requirements of the application. Containers should respond to SIGTERM/SIGKILL with graceful shutdown.| -|Suggested Remediation|The preStop can be used to gracefully stop the container and clean resources (e.g., DB connection). For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. All pods must respond to SIGTERM signal and shutdown gracefully with a zero exit code.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices| -|Exception Process|Identify which pod is not conforming to the process and submit information as to why it cannot use a preStop shutdown specification.| -|Impact Statement|Missing PreStop hooks can cause ungraceful shutdowns, data loss, and connection drops during container termination.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-container-prestop | +| Description | Ensure that the containers lifecycle preStop management feature is configured. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. When pods are shut down by the platform they are sent a SIGTERM signal which means that the process in the container should start shutting down, closing connections and stopping all activity. If the pod doesn’t shut down within the default 30 seconds then the platform may send a SIGKILL signal which will stop the pod immediately. This method isn’t as clean and the default time between the SIGTERM and SIGKILL messages can be modified based on the requirements of the application. Containers should respond to SIGTERM/SIGKILL with graceful shutdown. | +| Suggested Remediation | The preStop can be used to gracefully stop the container and clean resources (e.g., DB connection). For details, see https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. All pods must respond to SIGTERM signal and shutdown gracefully with a zero exit code. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices | +| Exception Process | Identify which pod is not conforming to the process and submit information as to why it cannot use a preStop shutdown specification. | +| Impact Statement | Missing PreStop hooks can cause ungraceful shutdowns, data loss, and connection drops during container termination. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-cpu-isolation -|Property|Description| -|---|---| -|Unique ID|lifecycle-cpu-isolation| -|Description|CPU isolation requires: For each container within the pod, resource requests and limits must be identical. If cpu requests and limits are not identical and in whole units (Guaranteed pods with exclusive cpus), your pods will not be tested for compliance. The runTimeClassName must be specified. Annotations required disabling CPU and IRQ load-balancing.| -|Suggested Remediation|CPU isolation testing is enabled. Please ensure that all pods adhere to the CPU isolation requirements.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-isolation| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Improper CPU isolation can cause performance interference between workloads and fail to provide guaranteed compute resources.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-cpu-isolation | +| Description | CPU isolation requires: For each container within the pod, resource requests and limits must be identical. If cpu requests and limits are not identical and in whole units (Guaranteed pods with exclusive cpus), your pods will not be tested for compliance. The runTimeClassName must be specified. Annotations required disabling CPU and IRQ load-balancing. | +| Suggested Remediation | CPU isolation testing is enabled. Please ensure that all pods adhere to the CPU isolation requirements. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-isolation | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Improper CPU isolation can cause performance interference between workloads and fail to provide guaranteed compute resources. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-crd-scaling -|Property|Description| -|---|---| -|Unique ID|lifecycle-crd-scaling| -|Description|Tests that a workload's CRD support scale in/out operations. First, the test starts getting the current replicaCount (N) of the crd/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the crd/s. In case of crd that are managed by HPA the test is changing the min and max value to crd Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the crd/s| -|Suggested Remediation|Ensure the workload's CRDs can scale in/out successfully.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations| -|Exception Process|There is no documented exception process for this. Not applicable to SNO applications.| -|Impact Statement|CRD scaling failures can prevent operator-managed applications from scaling properly, limiting application availability and performance.| -|Tags|common,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-crd-scaling | +| Description | Tests that a workload's CRD support scale in/out operations. First, the test starts getting the current replicaCount (N) of the crd/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the crd/s. In case of crd that are managed by HPA the test is changing the min and max value to crd Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the crd/s | +| Suggested Remediation | Ensure the workload's CRDs can scale in/out successfully. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations | +| Exception Process | There is no documented exception process for this. Not applicable to SNO applications. | +| Impact Statement | CRD scaling failures can prevent operator-managed applications from scaling properly, limiting application availability and performance. | +| Tags | common,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### lifecycle-deployment-scaling -|Property|Description| -|---|---| -|Unique ID|lifecycle-deployment-scaling| -|Description|Tests that workload deployments support scale in/out operations. First, the test starts getting the current replicaCount (N) of the deployment/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the deployment/s. In case of deployments that are managed by HPA the test is changing the min and max value to deployment Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the deployment/s| -|Suggested Remediation|Ensure the workload's deployments/replica sets can scale in/out successfully.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations| -|Exception Process|There is no documented exception process for this. Not applicable to SNO applications.| -|Impact Statement|Deployment scaling failures prevent horizontal scaling operations, limiting application elasticity and availability during high load.| -|Tags|common,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-deployment-scaling | +| Description | Tests that workload deployments support scale in/out operations. First, the test starts getting the current replicaCount (N) of the deployment/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the deployment/s. In case of deployments that are managed by HPA the test is changing the min and max value to deployment Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the deployment/s | +| Suggested Remediation | Ensure the workload's deployments/replica sets can scale in/out successfully. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations | +| Exception Process | There is no documented exception process for this. Not applicable to SNO applications. | +| Impact Statement | Deployment scaling failures prevent horizontal scaling operations, limiting application elasticity and availability during high load. | +| Tags | common,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### lifecycle-image-pull-policy -|Property|Description| -|---|---| -|Unique ID|lifecycle-image-pull-policy| -|Description|Ensure that the containers under test are using IfNotPresent as Image Pull Policy. If there is a situation where the container dies and needs to be restarted, the image pull policy becomes important. PullIfNotPresent is recommended so that a loss of image registry access does not prevent the pod from restarting.| -|Suggested Remediation|Ensure that the containers under test are using IfNotPresent as Image Pull Policy.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-use-imagepullpolicy:-ifnotpresent| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Incorrect image pull policies can cause deployment failures when image registries are unavailable or during network issues.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-image-pull-policy | +| Description | Ensure that the containers under test are using IfNotPresent as Image Pull Policy. If there is a situation where the container dies and needs to be restarted, the image pull policy becomes important. PullIfNotPresent is recommended so that a loss of image registry access does not prevent the pod from restarting. | +| Suggested Remediation | Ensure that the containers under test are using IfNotPresent as Image Pull Policy. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-use-imagepullpolicy:-ifnotpresent | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Incorrect image pull policies can cause deployment failures when image registries are unavailable or during network issues. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-liveness-probe -|Property|Description| -|---|---| -|Unique ID|lifecycle-liveness-probe| -|Description|Check that all containers under test have liveness probe defined. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. When starting up, health probes like liveness and readiness checks can be put into place to ensure the application is functioning properly.| -|Suggested Remediation|Add a liveness probe to deployed containers. workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-liveness-readiness-and-startup-probes| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing liveness probes prevent Kubernetes from detecting and recovering from application deadlocks and hangs.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-liveness-probe | +| Description | Check that all containers under test have liveness probe defined. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. When starting up, health probes like liveness and readiness checks can be put into place to ensure the application is functioning properly. | +| Suggested Remediation | Add a liveness probe to deployed containers. workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-liveness-readiness-and-startup-probes | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing liveness probes prevent Kubernetes from detecting and recovering from application deadlocks and hangs. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-persistent-volume-reclaim-policy -|Property|Description| -|---|---| -|Unique ID|lifecycle-persistent-volume-reclaim-policy| -|Description|Check that the persistent volumes the workloads pods are using have a reclaim policy of delete. Network Functions should clear persistent storage by deleting their PVs when removing their application from a cluster.| -|Suggested Remediation|Ensure that all persistent volumes are using the reclaim policy: delete| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-csi| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Incorrect reclaim policies can lead to data persistence after application removal, causing storage waste and potential data security issues.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-persistent-volume-reclaim-policy | +| Description | Check that the persistent volumes the workloads pods are using have a reclaim policy of delete. Network Functions should clear persistent storage by deleting their PVs when removing their application from a cluster. | +| Suggested Remediation | Ensure that all persistent volumes are using the reclaim policy: delete | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-csi | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Incorrect reclaim policies can lead to data persistence after application removal, causing storage waste and potential data security issues. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-pod-high-availability -|Property|Description| -|---|---| -|Unique ID|lifecycle-pod-high-availability| -|Description|Ensures that workloads Pods specify podAntiAffinity rules and replica value is set to more than 1.| -|Suggested Remediation|In high availability cases, Pod podAntiAffinity rule should be specified for pod scheduling and pod replica value is set to more than 1 .| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations| -|Exception Process|There is no documented exception process for this. Not applicable to SNO applications.| -|Impact Statement|Missing anti-affinity rules can cause all pod replicas to be scheduled on the same node, creating single points of failure.| -|Tags|common,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-pod-high-availability | +| Description | Ensures that workloads Pods specify podAntiAffinity rules and replica value is set to more than 1. | +| Suggested Remediation | In high availability cases, Pod podAntiAffinity rule should be specified for pod scheduling and pod replica value is set to more than 1 . | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations | +| Exception Process | There is no documented exception process for this. Not applicable to SNO applications. | +| Impact Statement | Missing anti-affinity rules can cause all pod replicas to be scheduled on the same node, creating single points of failure. | +| Tags | common,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### lifecycle-pod-owner-type -|Property|Description| -|---|---| -|Unique ID|lifecycle-pod-owner-type| -|Description|Tests that the workload Pods are deployed as part of a ReplicaSet(s)/StatefulSet(s).| -|Suggested Remediation|Deploy the workload using ReplicaSet/StatefulSet.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-no-naked-pods| -|Exception Process|There is no documented exception process for this. Pods should not be deployed as DaemonSet or naked pods.| -|Impact Statement|Naked pods and DaemonSets lack proper lifecycle management, making updates, scaling, and recovery operations difficult or impossible.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-pod-owner-type | +| Description | Tests that the workload Pods are deployed as part of a ReplicaSet(s)/StatefulSet(s). | +| Suggested Remediation | Deploy the workload using ReplicaSet/StatefulSet. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-no-naked-pods | +| Exception Process | There is no documented exception process for this. Pods should not be deployed as DaemonSet or naked pods. | +| Impact Statement | Naked pods and DaemonSets lack proper lifecycle management, making updates, scaling, and recovery operations difficult or impossible. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-pod-recreation -|Property|Description| -|---|---| -|Unique ID|lifecycle-pod-recreation| -|Description|Tests that a workload is configured to support High Availability. First, this test cordons and drains a Node that hosts the workload Pod. Next, the test ensures that OpenShift can re-instantiate the Pod on another Node, and that the actual replica count matches the desired replica count.| -|Suggested Remediation|Ensure that the workloads Pods utilize a configuration that supports High Availability. Additionally, ensure that there are available Nodes in the OpenShift cluster that can be utilized in the event that a host Node fails.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations| -|Exception Process|No exceptions - workloads should be able to be restarted/recreated.| -|Impact Statement|Failed pod recreation indicates poor high availability configuration, leading to potential service outages during node failures.| -|Tags|common,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | lifecycle-pod-recreation | +| Description | Tests that a workload is configured to support High Availability. First, this test cordons and drains a Node that hosts the workload Pod. Next, the test ensures that OpenShift can re-instantiate the Pod on another Node, and that the actual replica count matches the desired replica count. | +| Suggested Remediation | Ensure that the workloads Pods utilize a configuration that supports High Availability. Additionally, ensure that there are available Nodes in the OpenShift cluster that can be utilized in the event that a host Node fails. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations | +| Exception Process | No exceptions - workloads should be able to be restarted/recreated. | +| Impact Statement | Failed pod recreation indicates poor high availability configuration, leading to potential service outages during node failures. | +| Tags | common,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### lifecycle-pod-scheduling -|Property|Description| -|---|---| -|Unique ID|lifecycle-pod-scheduling| -|Description|Ensures that workload Pods do not specify nodeSelector or nodeAffinity. In most cases, Pods should allow for instantiation on any underlying Node. Workloads shall not use node selectors nor taints/tolerations to assign pod location.| -|Suggested Remediation|In most cases, Pod's should not specify their host Nodes through nodeSelector or nodeAffinity. However, there are cases in which workloads require specialized hardware specific to a particular class of Node.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations| -|Exception Process|Exception will only be considered if application requires specialized hardware. Must specify which container requires special hardware and why.| -|Impact Statement|Node selectors can create scheduling constraints that reduce cluster flexibility and cause deployment failures when nodes are unavailable.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Mandatory| -|Telco|Optional| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-pod-scheduling | +| Description | Ensures that workload Pods do not specify nodeSelector or nodeAffinity. In most cases, Pods should allow for instantiation on any underlying Node. Workloads shall not use node selectors nor taints/tolerations to assign pod location. | +| Suggested Remediation | In most cases, Pod's should not specify their host Nodes through nodeSelector or nodeAffinity. However, there are cases in which workloads require specialized hardware specific to a particular class of Node. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations | +| Exception Process | Exception will only be considered if application requires specialized hardware. Must specify which container requires special hardware and why. | +| Impact Statement | Node selectors can create scheduling constraints that reduce cluster flexibility and cause deployment failures when nodes are unavailable. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Mandatory | +| Telco | Optional | #### lifecycle-pod-toleration-bypass -|Property|Description| -|---|---| -|Unique ID|lifecycle-pod-toleration-bypass| -|Description|Check that pods do not have NoExecute, PreferNoSchedule, or NoSchedule tolerations that have been modified from the default.| -|Suggested Remediation|Do not allow pods to bypass the NoExecute, PreferNoSchedule, or NoSchedule tolerations that are default applied by Kubernetes.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Modified tolerations can allow pods to be scheduled on inappropriate nodes, violating scheduling policies and causing performance issues.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-pod-toleration-bypass | +| Description | Check that pods do not have NoExecute, PreferNoSchedule, or NoSchedule tolerations that have been modified from the default. | +| Suggested Remediation | Do not allow pods to bypass the NoExecute, PreferNoSchedule, or NoSchedule tolerations that are default applied by Kubernetes. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Modified tolerations can allow pods to be scheduled on inappropriate nodes, violating scheduling policies and causing performance issues. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-readiness-probe -|Property|Description| -|---|---| -|Unique ID|lifecycle-readiness-probe| -|Description|Check that all containers under test have readiness probe defined. There are different ways a pod can stop on on OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.| -|Suggested Remediation|Add a readiness probe to deployed containers| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-liveness-readiness-and-startup-probes| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing readiness probes can cause traffic to be routed to non-ready pods, resulting in failed requests and poor user experience.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-readiness-probe | +| Description | Check that all containers under test have readiness probe defined. There are different ways a pod can stop on on OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem. | +| Suggested Remediation | Add a readiness probe to deployed containers | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-liveness-readiness-and-startup-probes | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing readiness probes can cause traffic to be routed to non-ready pods, resulting in failed requests and poor user experience. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-startup-probe -|Property|Description| -|---|---| -|Unique ID|lifecycle-startup-probe| -|Description|Check that all containers under test have startup probe defined. Workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.| -|Suggested Remediation|Add a startup probe to deployed containers| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-liveness-readiness-and-startup-probes| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing startup probes can cause slow-starting applications to be killed prematurely, preventing successful application startup.| -|Tags|telco,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-startup-probe | +| Description | Check that all containers under test have startup probe defined. Workloads shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum. | +| Suggested Remediation | Add a startup probe to deployed containers | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-liveness-readiness-and-startup-probes | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing startup probes can cause slow-starting applications to be killed prematurely, preventing successful application startup. | +| Tags | telco,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### lifecycle-statefulset-scaling -|Property|Description| -|---|---| -|Unique ID|lifecycle-statefulset-scaling| -|Description|Tests that workload statefulsets support scale in/out operations. First, the test starts getting the current replicaCount (N) of the statefulset/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the statefulset/s. In case of statefulsets that are managed by HPA the test is changing the min and max value to statefulset Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the statefulset/s| -|Suggested Remediation|Ensure the workload's statefulsets/replica sets can scale in/out successfully.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations| -|Exception Process|There is no documented exception process for this. Not applicable to SNO applications.| -|Impact Statement|StatefulSet scaling issues can prevent proper data persistence and ordered deployment of stateful applications.| -|Tags|common,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-statefulset-scaling | +| Description | Tests that workload statefulsets support scale in/out operations. First, the test starts getting the current replicaCount (N) of the statefulset/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the statefulset/s. In case of statefulsets that are managed by HPA the test is changing the min and max value to statefulset Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the statefulset/s | +| Suggested Remediation | Ensure the workload's statefulsets/replica sets can scale in/out successfully. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations | +| Exception Process | There is no documented exception process for this. Not applicable to SNO applications. | +| Impact Statement | StatefulSet scaling issues can prevent proper data persistence and ordered deployment of stateful applications. | +| Tags | common,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### lifecycle-storage-provisioner -|Property|Description| -|---|---| -|Unique ID|lifecycle-storage-provisioner| -|Description|Checks that pods do not place persistent volumes on local storage in multinode clusters. Local storage is recommended for single node clusters, but only one type of local storage should be installed (lvms or noprovisioner).| -|Suggested Remediation|Use a non-local storage (e.g. no kubernetes.io/no-provisioner and no topolvm.io provisioners) in multinode clusters. Local storage are recommended for single node clusters only, but a single local provisioner should be installed.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-local-storage| -|Exception Process|No exceptions| -|Impact Statement|Inappropriate storage provisioners can cause data persistence issues, performance problems, and storage failures.| -|Tags|common,lifecycle| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | lifecycle-storage-provisioner | +| Description | Checks that pods do not place persistent volumes on local storage in multinode clusters. Local storage is recommended for single node clusters, but only one type of local storage should be installed (lvms or noprovisioner). | +| Suggested Remediation | Use a non-local storage (e.g. no kubernetes.io/no-provisioner and no topolvm.io provisioners) in multinode clusters. Local storage are recommended for single node clusters only, but a single local provisioner should be installed. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-local-storage | +| Exception Process | No exceptions | +| Impact Statement | Inappropriate storage provisioners can cause data persistence issues, performance problems, and storage failures. | +| Tags | common,lifecycle | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | ### manageability #### manageability-container-port-name-format -|Property|Description| -|---|---| -|Unique ID|manageability-container-port-name-format| -|Description|Check that the container's ports name follow the naming conventions. Name field in ContainerPort section must be of form `[-]`. More naming convention requirements may be released in future| -|Suggested Remediation|Ensure that the container's ports name follow our partner naming conventions| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Incorrect port naming conventions can cause service discovery issues and configuration management problems.| -|Tags|extended,manageability| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | manageability-container-port-name-format | +| Description | Check that the container's ports name follow the naming conventions. Name field in ContainerPort section must be of form `[-]`. More naming convention requirements may be released in future | +| Suggested Remediation | Ensure that the container's ports name follow our partner naming conventions | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Incorrect port naming conventions can cause service discovery issues and configuration management problems. | +| Tags | extended,manageability | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### manageability-containers-image-tag -|Property|Description| -|---|---| -|Unique ID|manageability-containers-image-tag| -|Description|Check that image tag exists on containers.| -|Suggested Remediation|Ensure that all the container images are tagged. Checks containers have image tags (e.g. latest, stable, dev).| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-tagging| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Missing image tags make it difficult to track versions, perform rollbacks, and maintain deployment consistency.| -|Tags|extended,manageability| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------- | +| Unique ID | manageability-containers-image-tag | +| Description | Check that image tag exists on containers. | +| Suggested Remediation | Ensure that all the container images are tagged. Checks containers have image tags (e.g. latest, stable, dev). | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-tagging | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Missing image tags make it difficult to track versions, perform rollbacks, and maintain deployment consistency. | +| Tags | extended,manageability | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | ### networking -#### networking-dpdk-cpu-pinning-exec-probe - -|Property|Description| -|---|---| -|Unique ID|networking-dpdk-cpu-pinning-exec-probe| -|Description|If a workload is doing CPU pinning, exec probes may not be used.| -|Suggested Remediation|If the workload is doing CPU pinning and running a DPDK process do not use exec probes (executing a command within the container) as it may pile up and block the node eventually.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Exec probes on CPU-pinned DPDK workloads can cause performance degradation, interrupt real-time operations, and potentially crash applications due to resource contention.| -|Tags|telco,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| - #### networking-dual-stack-service -|Property|Description| -|---|---| -|Unique ID|networking-dual-stack-service| -|Description|Checks that all services in namespaces under test are either ipv6 single stack or dual stack. This test case requires the deployment of the probe daemonset.| -|Suggested Remediation|Configure every workload service with either a single stack ipv6 or dual stack (ipv4/ipv6) load balancer.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-&-ipv6| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Single-stack IPv4 services limit network architecture flexibility and prevent migration to modern dual-stack infrastructures.| -|Tags|extended,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | networking-dual-stack-service | +| Description | Checks that all services in namespaces under test are either ipv6 single stack or dual stack. This test case requires the deployment of the probe daemonset. | +| Suggested Remediation | Configure every workload service with either a single stack ipv6 or dual stack (ipv4/ipv6) load balancer. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-&-ipv6 | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Single-stack IPv4 services limit network architecture flexibility and prevent migration to modern dual-stack infrastructures. | +| Tags | extended,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### networking-icmpv4-connectivity -|Property|Description| -|---|---| -|Unique ID|networking-icmpv4-connectivity| -|Description|Checks that each workload Container is able to communicate via ICMPv4 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.| -|Suggested Remediation|Ensure that the workload is able to communicate via the Default OpenShift network. In some rare cases, workloads may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-&-ipv6| -|Exception Process|No exceptions - must be able to communicate on default network using IPv4| -|Impact Statement|Failure indicates potential network isolation issues that could prevent workload components from communicating, leading to service degradation or complete application failure.| -|Tags|common,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | networking-icmpv4-connectivity | +| Description | Checks that each workload Container is able to communicate via ICMPv4 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped. | +| Suggested Remediation | Ensure that the workload is able to communicate via the Default OpenShift network. In some rare cases, workloads may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-&-ipv6 | +| Exception Process | No exceptions - must be able to communicate on default network using IPv4 | +| Impact Statement | Failure indicates potential network isolation issues that could prevent workload components from communicating, leading to service degradation or complete application failure. | +| Tags | common,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### networking-icmpv4-connectivity-multus -|Property|Description| -|---|---| -|Unique ID|networking-icmpv4-connectivity-multus| -|Description|Checks that each workload Container is able to communicate via ICMPv4 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.| -|Suggested Remediation|Ensure that the workload is able to communicate via the Multus network(s). In some rare cases, workloads may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if MULTUS is not supported.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Multus network connectivity issues can isolate workloads from secondary networks, breaking multi-network applications and reducing network redundancy.| -|Tags|telco,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | networking-icmpv4-connectivity-multus | +| Description | Checks that each workload Container is able to communicate via ICMPv4 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped. | +| Suggested Remediation | Ensure that the workload is able to communicate via the Multus network(s). In some rare cases, workloads may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if MULTUS is not supported. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Multus network connectivity issues can isolate workloads from secondary networks, breaking multi-network applications and reducing network redundancy. | +| Tags | telco,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### networking-icmpv6-connectivity -|Property|Description| -|---|---| -|Unique ID|networking-icmpv6-connectivity| -|Description|Checks that each workload Container is able to communicate via ICMPv6 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.| -|Suggested Remediation|Ensure that the workload is able to communicate via the Default OpenShift network. In some rare cases, workloads may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if IPv6 is not supported.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-&-ipv6| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|IPv6 connectivity failures can prevent dual-stack applications from functioning properly and limit future network architecture flexibility.| -|Tags|common,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | networking-icmpv6-connectivity | +| Description | Checks that each workload Container is able to communicate via ICMPv6 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped. | +| Suggested Remediation | Ensure that the workload is able to communicate via the Default OpenShift network. In some rare cases, workloads may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if IPv6 is not supported. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-&-ipv6 | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | IPv6 connectivity failures can prevent dual-stack applications from functioning properly and limit future network architecture flexibility. | +| Tags | common,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### networking-icmpv6-connectivity-multus -|Property|Description| -|---|---| -|Unique ID|networking-icmpv6-connectivity-multus| -|Description|Checks that each workload Container is able to communicate via ICMPv6 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped.| -|Suggested Remediation|Ensure that the workload is able to communicate via the Multus network(s). In some rare cases, workloads may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it.The label value is trivial, only its presence. Not applicable if IPv6/MULTUS is not supported.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|IPv6 Multus connectivity problems can prevent dual-stack multi-network scenarios from working, limiting network scalability and future-proofing.| -|Tags|telco,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | networking-icmpv6-connectivity-multus | +| Description | Checks that each workload Container is able to communicate via ICMPv6 on the Multus network(s). This test case requires the Deployment of the probe daemonset and at least 2 pods connected to each network under test(one source and one destination). If no network with more than 2 pods exists this test will be skipped. | +| Suggested Remediation | Ensure that the workload is able to communicate via the Multus network(s). In some rare cases, workloads may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it.The label value is trivial, only its presence. Not applicable if IPv6/MULTUS is not supported. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | IPv6 Multus connectivity problems can prevent dual-stack multi-network scenarios from working, limiting network scalability and future-proofing. | +| Tags | telco,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### networking-network-attachment-definition-sriov-mtu -|Property|Description| -|---|---| -|Unique ID|networking-network-attachment-definition-sriov-mtu| -|Description|Ensures that MTU values are set correctly in NetworkAttachmentDefinitions for SRIOV network interfaces.| -|Suggested Remediation|Ensure that the MTU of the SR-IOV network attachment definition is set explicitly.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-multus-sr-iov---macvlan| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Incorrect MTU settings can cause packet fragmentation, network performance issues, and connectivity failures in high-performance networking scenarios.| -|Tags|faredge,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | networking-network-attachment-definition-sriov-mtu | +| Description | Ensures that MTU values are set correctly in NetworkAttachmentDefinitions for SRIOV network interfaces. | +| Suggested Remediation | Ensure that the MTU of the SR-IOV network attachment definition is set explicitly. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-multus-sr-iov---macvlan | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Incorrect MTU settings can cause packet fragmentation, network performance issues, and connectivity failures in high-performance networking scenarios. | +| Tags | faredge,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### networking-network-policy-deny-all -|Property|Description| -|---|---| -|Unique ID|networking-network-policy-deny-all| -|Description|Check that network policies attached to namespaces running workload pods contain a default deny-all rule for both ingress and egress traffic| -|Suggested Remediation|Ensure that a NetworkPolicy with a default deny-all is applied. After the default is applied, apply a network policy to allow the traffic your application requires.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-vrfs-aka-routing-instances| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Without default deny-all network policies, workloads are exposed to lateral movement attacks and unauthorized network access, compromising security posture and potentially enabling data breaches.| -|Tags|common,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | networking-network-policy-deny-all | +| Description | Check that network policies attached to namespaces running workload pods contain a default deny-all rule for both ingress and egress traffic | +| Suggested Remediation | Ensure that a NetworkPolicy with a default deny-all is applied. After the default is applied, apply a network policy to allow the traffic your application requires. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-vrfs-aka-routing-instances | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Without default deny-all network policies, workloads are exposed to lateral movement attacks and unauthorized network access, compromising security posture and potentially enabling data breaches. | +| Tags | common,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### networking-ocp-reserved-ports-usage -|Property|Description| -|---|---| -|Unique ID|networking-ocp-reserved-ports-usage| -|Description|Check that containers do not listen on ports that are reserved by OpenShift| -|Suggested Remediation|Ensure that workload's apps do not listen on ports that are reserved by OpenShift. The following ports are reserved by OpenShift and must NOT be used by any application: 22623, 22624.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ports-reserved-by-openshift| -|Exception Process|No exceptions| -|Impact Statement|Using OpenShift-reserved ports can cause critical platform services to fail, potentially destabilizing the entire cluster.| -|Tags|common,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | networking-ocp-reserved-ports-usage | +| Description | Check that containers do not listen on ports that are reserved by OpenShift | +| Suggested Remediation | Ensure that workload's apps do not listen on ports that are reserved by OpenShift. The following ports are reserved by OpenShift and must NOT be used by any application: 22623, 22624. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ports-reserved-by-openshift | +| Exception Process | No exceptions | +| Impact Statement | Using OpenShift-reserved ports can cause critical platform services to fail, potentially destabilizing the entire cluster. | +| Tags | common,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### networking-reserved-partner-ports -|Property|Description| -|---|---| -|Unique ID|networking-reserved-partner-ports| -|Description|Checks that pods and containers are not consuming ports designated as reserved by partner| -|Suggested Remediation|Ensure ports are not being used that are reserved by our partner| -|Best Practice Reference|No Doc Link - Extended| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Using reserved ports can cause port conflicts with essential platform services, leading to service startup failures and unpredictable application behavior.| -|Tags|extended,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | networking-reserved-partner-ports | +| Description | Checks that pods and containers are not consuming ports designated as reserved by partner | +| Suggested Remediation | Ensure ports are not being used that are reserved by our partner | +| Best Practice Reference | No Doc Link - Extended | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Using reserved ports can cause port conflicts with essential platform services, leading to service startup failures and unpredictable application behavior. | +| Tags | extended,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### networking-restart-on-reboot-sriov-pod -|Property|Description| -|---|---| -|Unique ID|networking-restart-on-reboot-sriov-pod| -|Description|Ensures that the label restart-on-reboot exists on pods that use SRIOV network interfaces.| -|Suggested Remediation|Ensure that the label restart-on-reboot exists on pods that use SRIOV network interfaces.| -|Best Practice Reference|No Doc Link - Far Edge| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Without restart-on-reboot labels, SRIOV-enabled pods may fail to recover from a race condition between kubernetes services startup and SR-IOV device plugin configuration on StarlingX AIO systems, causing SR-IOV devices to disappear from running pods when FPGA devices are reset.| -|Tags|faredge,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | networking-restart-on-reboot-sriov-pod | +| Description | Ensures that the label restart-on-reboot exists on pods that use SRIOV network interfaces. | +| Suggested Remediation | Ensure that the label restart-on-reboot exists on pods that use SRIOV network interfaces. | +| Best Practice Reference | No Doc Link - Far Edge | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Without restart-on-reboot labels, SRIOV-enabled pods may fail to recover from a race condition between kubernetes services startup and SR-IOV device plugin configuration on StarlingX AIO systems, causing SR-IOV devices to disappear from running pods when FPGA devices are reset. | +| Tags | faredge,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Optional | #### networking-undeclared-container-ports-usage -|Property|Description| -|---|---| -|Unique ID|networking-undeclared-container-ports-usage| -|Description|Check that containers do not listen on ports that weren't declared in their specification. Platforms may be configured to block undeclared ports.| -|Suggested Remediation|Ensure the workload's apps do not listen on undeclared containers' ports.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Undeclared ports can be blocked by security policies, causing unexpected connectivity issues and making troubleshooting difficult.| -|Tags|extended,networking| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | networking-undeclared-container-ports-usage | +| Description | Check that containers do not listen on ports that weren't declared in their specification. Platforms may be configured to block undeclared ports. | +| Suggested Remediation | Ensure the workload's apps do not listen on undeclared containers' ports. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Undeclared ports can be blocked by security policies, causing unexpected connectivity issues and making troubleshooting difficult. | +| Tags | extended,networking | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | ### observability #### observability-compatibility-with-next-ocp-release -|Property|Description| -|---|---| -|Unique ID|observability-compatibility-with-next-ocp-release| -|Description|Checks to ensure if the APIs the workload uses are compatible with the next OCP version| -|Suggested Remediation|Ensure the APIs the workload uses are compatible with the next OCP version| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-k8s-api-versions| -|Exception Process|No exceptions| -|Impact Statement|Deprecated API usage can cause applications to break during OpenShift upgrades, requiring emergency fixes.| -|Tags|common,observability| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------- | +| Unique ID | observability-compatibility-with-next-ocp-release | +| Description | Checks to ensure if the APIs the workload uses are compatible with the next OCP version | +| Suggested Remediation | Ensure the APIs the workload uses are compatible with the next OCP version | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-k8s-api-versions | +| Exception Process | No exceptions | +| Impact Statement | Deprecated API usage can cause applications to break during OpenShift upgrades, requiring emergency fixes. | +| Tags | common,observability | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### observability-container-logging -|Property|Description| -|---|---| -|Unique ID|observability-container-logging| -|Description|Check that all containers under test use standard input output and standard error when logging. A container must provide APIs for the platform to observe the container health and act accordingly. These APIs include health checks (liveness and readiness), logging to stderr and stdout for log aggregation (by tools such as Logstash or Filebeat), and integrate with tracing and metrics-gathering libraries (such as Prometheus or Metricbeat).| -|Suggested Remediation|Ensure containers are not redirecting stdout/stderr| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-logging| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Improper logging configuration prevents log aggregation and monitoring, making troubleshooting and debugging difficult.| -|Tags|telco,observability| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | observability-container-logging | +| Description | Check that all containers under test use standard input output and standard error when logging. A container must provide APIs for the platform to observe the container health and act accordingly. These APIs include health checks (liveness and readiness), logging to stderr and stdout for log aggregation (by tools such as Logstash or Filebeat), and integrate with tracing and metrics-gathering libraries (such as Prometheus or Metricbeat). | +| Suggested Remediation | Ensure containers are not redirecting stdout/stderr | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-logging | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Improper logging configuration prevents log aggregation and monitoring, making troubleshooting and debugging difficult. | +| Tags | telco,observability | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### observability-crd-status -|Property|Description| -|---|---| -|Unique ID|observability-crd-status| -|Description|Checks that all CRDs have a status sub-resource specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]).| -|Suggested Remediation|Ensure that all the CRDs have a meaningful status specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]).| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Missing status subresources prevent proper monitoring and automation based on custom resource states.| -|Tags|common,observability| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | observability-crd-status | +| Description | Checks that all CRDs have a status sub-resource specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]). | +| Suggested Remediation | Ensure that all the CRDs have a meaningful status specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]). | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Missing status subresources prevent proper monitoring and automation based on custom resource states. | +| Tags | common,observability | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### observability-pod-disruption-budget -|Property|Description| -|---|---| -|Unique ID|observability-pod-disruption-budget| -|Description|Checks to see if pod disruption budgets have allowed values for minAvailable and maxUnavailable| -|Suggested Remediation|Ensure minAvailable is not zero and maxUnavailable does not equal the number of pods in the replica| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations| -|Exception Process|No exceptions| -|Impact Statement|Improper disruption budgets can prevent necessary maintenance operations or allow too many pods to be disrupted simultaneously.| -|Tags|common,observability| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | observability-pod-disruption-budget | +| Description | Checks to see if pod disruption budgets have allowed values for minAvailable and maxUnavailable | +| Suggested Remediation | Ensure minAvailable is not zero and maxUnavailable does not equal the number of pods in the replica | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations | +| Exception Process | No exceptions | +| Impact Statement | Improper disruption budgets can prevent necessary maintenance operations or allow too many pods to be disrupted simultaneously. | +| Tags | common,observability | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### observability-termination-policy -|Property|Description| -|---|---| -|Unique ID|observability-termination-policy| -|Description|Check that all containers are using terminationMessagePolicy: FallbackToLogsOnError. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.| -|Suggested Remediation|Ensure containers are all using FallbackToLogsOnError in terminationMessagePolicy| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Incorrect termination message policies can prevent proper error reporting and make failure diagnosis difficult.| -|Tags|telco,observability| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | observability-termination-policy | +| Description | Check that all containers are using terminationMessagePolicy: FallbackToLogsOnError. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem. | +| Suggested Remediation | Ensure containers are all using FallbackToLogsOnError in terminationMessagePolicy | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Incorrect termination message policies can prevent proper error reporting and make failure diagnosis difficult. | +| Tags | telco,observability | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | ### operator #### operator-catalogsource-bundle-count -|Property|Description| -|---|---| -|Unique ID|operator-catalogsource-bundle-count| -|Description|Tests operator catalog source bundle count is less than 1000| -|Suggested Remediation|Ensure that the Operator's catalog source has a valid bundle count less than 1000.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Large catalog sources can cause performance issues, slow operator resolution, and increase cluster resource usage.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------ | +| Unique ID | operator-catalogsource-bundle-count | +| Description | Tests operator catalog source bundle count is less than 1000 | +| Suggested Remediation | Ensure that the Operator's catalog source has a valid bundle count less than 1000. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Large catalog sources can cause performance issues, slow operator resolution, and increase cluster resource usage. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### operator-crd-openapi-schema -|Property|Description| -|---|---| -|Unique ID|operator-crd-openapi-schema| -|Description|Tests whether an application Operator CRD is defined with OpenAPI spec.| -|Suggested Remediation|Ensure that the Operator CRD is defined with OpenAPI spec.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Missing OpenAPI schemas prevent proper validation and can lead to configuration errors and runtime failures.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------ | +| Unique ID | operator-crd-openapi-schema | +| Description | Tests whether an application Operator CRD is defined with OpenAPI spec. | +| Suggested Remediation | Ensure that the Operator CRD is defined with OpenAPI spec. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Missing OpenAPI schemas prevent proper validation and can lead to configuration errors and runtime failures. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### operator-crd-versioning -|Property|Description| -|---|---| -|Unique ID|operator-crd-versioning| -|Description|Tests whether the Operator CRD has a valid versioning.| -|Suggested Remediation|Ensure that the Operator CRD has a valid version.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Invalid CRD versioning can cause API compatibility issues and prevent proper schema evolution.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------- | +| Unique ID | operator-crd-versioning | +| Description | Tests whether the Operator CRD has a valid versioning. | +| Suggested Remediation | Ensure that the Operator CRD has a valid version. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Invalid CRD versioning can cause API compatibility issues and prevent proper schema evolution. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### operator-install-source -|Property|Description| -|---|---| -|Unique ID|operator-install-source| -|Description|Tests whether a workload Operator is installed via OLM.| -|Suggested Remediation|Ensure that your Operator is installed via OLM.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Non-OLM operators bypass lifecycle management and dependency resolution, creating operational complexity and update issues.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | operator-install-source | +| Description | Tests whether a workload Operator is installed via OLM. | +| Suggested Remediation | Ensure that your Operator is installed via OLM. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Non-OLM operators bypass lifecycle management and dependency resolution, creating operational complexity and update issues. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### operator-install-status-no-privileges -|Property|Description| -|---|---| -|Unique ID|operator-install-status-no-privileges| -|Description|Checks whether the operator needs access to Security Context Constraints. Test passes if clusterPermissions is not present in the CSV manifest or is present with no RBAC rules related to SCCs.| -|Suggested Remediation|Ensure all the workload's operators have no privileges on cluster resources.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Operators with SCC access have elevated privileges that can compromise cluster security and violate security policies.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | operator-install-status-no-privileges | +| Description | Checks whether the operator needs access to Security Context Constraints. Test passes if clusterPermissions is not present in the CSV manifest or is present with no RBAC rules related to SCCs. | +| Suggested Remediation | Ensure all the workload's operators have no privileges on cluster resources. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Operators with SCC access have elevated privileges that can compromise cluster security and violate security policies. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### operator-install-status-succeeded -|Property|Description| -|---|---| -|Unique ID|operator-install-status-succeeded| -|Description|Ensures that the target workload operators report "Succeeded" as their installation status.| -|Suggested Remediation|Ensure all the workload's operators have been successfully installed by OLM.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Failed operator installations can leave applications in incomplete states, causing functionality gaps and operational issues.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | operator-install-status-succeeded | +| Description | Ensures that the target workload operators report "Succeeded" as their installation status. | +| Suggested Remediation | Ensure all the workload's operators have been successfully installed by OLM. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Failed operator installations can leave applications in incomplete states, causing functionality gaps and operational issues. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### operator-multiple-same-operators -|Property|Description| -|---|---| -|Unique ID|operator-multiple-same-operators| -|Description|Tests whether multiple instances of the same Operator CSV are installed.| -|Suggested Remediation|Ensure that only one Operator of the same type is installed in the cluster.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Multiple operator instances can cause conflicts, resource contention, and unpredictable behavior.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------- | +| Unique ID | operator-multiple-same-operators | +| Description | Tests whether multiple instances of the same Operator CSV are installed. | +| Suggested Remediation | Ensure that only one Operator of the same type is installed in the cluster. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Multiple operator instances can cause conflicts, resource contention, and unpredictable behavior. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### operator-olm-skip-range -|Property|Description| -|---|---| -|Unique ID|operator-olm-skip-range| -|Description|Test that checks the operator has a valid olm skip range.| -|Suggested Remediation|Ensure that the Operator has a valid OLM skip range. If the operator does not have another version to "skip", then ignore the result of this test.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|If there is not a version of the operator that needs to be skipped, then an exception will be granted.| -|Impact Statement|Invalid skip ranges can prevent proper operator upgrades and cause version compatibility issues.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | operator-olm-skip-range | +| Description | Test that checks the operator has a valid olm skip range. | +| Suggested Remediation | Ensure that the Operator has a valid OLM skip range. If the operator does not have another version to "skip", then ignore the result of this test. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | If there is not a version of the operator that needs to be skipped, then an exception will be granted. | +| Impact Statement | Invalid skip ranges can prevent proper operator upgrades and cause version compatibility issues. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### operator-pods-no-hugepages -|Property|Description| -|---|---| -|Unique ID|operator-pods-no-hugepages| -|Description|Tests that the pods do not have hugepages enabled.| -|Suggested Remediation|Ensure that the pods are not using hugepages| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Hugepage usage by operators can interfere with application hugepage allocation and cause resource contention.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------- | +| Unique ID | operator-pods-no-hugepages | +| Description | Tests that the pods do not have hugepages enabled. | +| Suggested Remediation | Ensure that the pods are not using hugepages | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Hugepage usage by operators can interfere with application hugepage allocation and cause resource contention. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### operator-semantic-versioning -|Property|Description| -|---|---| -|Unique ID|operator-semantic-versioning| -|Description|Tests whether an application Operator has a valid semantic versioning.| -|Suggested Remediation|Ensure that the Operator has a valid semantic versioning.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Invalid semantic versioning prevents proper upgrade paths and dependency management, causing operational issues.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------- | +| Unique ID | operator-semantic-versioning | +| Description | Tests whether an application Operator has a valid semantic versioning. | +| Suggested Remediation | Ensure that the Operator has a valid semantic versioning. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Invalid semantic versioning prevents proper upgrade paths and dependency management, causing operational issues. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### operator-single-crd-owner -|Property|Description| -|---|---| -|Unique ID|operator-single-crd-owner| -|Description|Tests whether a CRD is owned by a single Operator.| -|Suggested Remediation|Ensure that a CRD is owned by only one Operator| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Multiple CRD owners can cause conflicts, inconsistent behavior, and management complexity.| -|Tags|common,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------- | +| Unique ID | operator-single-crd-owner | +| Description | Tests whether a CRD is owned by a single Operator. | +| Suggested Remediation | Ensure that a CRD is owned by only one Operator | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Multiple CRD owners can cause conflicts, inconsistent behavior, and management complexity. | +| Tags | common,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### operator-single-or-multi-namespaced-allowed-in-tenant-namespaces -|Property|Description| -|---|---| -|Unique ID|operator-single-or-multi-namespaced-allowed-in-tenant-namespaces| -|Description|Verifies that only single/multi namespaced operators are installed in a tenant-dedicated namespace. The test fails if this namespace contains any installed operator with Own/All-namespaced install mode, unlabeled operators, operands of any operator installed elsewhere, or pods unrelated to any operator.| -|Suggested Remediation|Ensure that operator with install mode SingleNamespaced or MultiNamespaced only is installed in the tenant namespace. Any installed operator with different install mode (AllNamespaced or OwnNamespaced) or pods not belonging to any operator must not be present in this namespace.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Improperly scoped operators can violate tenant isolation and create unauthorized cross-namespace access.| -|Tags|extended,operator| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | operator-single-or-multi-namespaced-allowed-in-tenant-namespaces | +| Description | Verifies that only single/multi namespaced operators are installed in a tenant-dedicated namespace. The test fails if this namespace contains any installed operator with Own/All-namespaced install mode, unlabeled operators, operands of any operator installed elsewhere, or pods unrelated to any operator. | +| Suggested Remediation | Ensure that operator with install mode SingleNamespaced or MultiNamespaced only is installed in the tenant namespace. Any installed operator with different install mode (AllNamespaced or OwnNamespaced) or pods not belonging to any operator must not be present in this namespace. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Improperly scoped operators can violate tenant isolation and create unauthorized cross-namespace access. | +| Tags | extended,operator | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | ### performance #### performance-exclusive-cpu-pool -|Property|Description| -|---|---| -|Unique ID|performance-exclusive-cpu-pool| -|Description|Ensures that if one container in a Pod selects an exclusive CPU pool the rest select the same type of CPU pool| -|Suggested Remediation|Ensure that if one container in a Pod selects an exclusive CPU pool the rest also select this type of CPU pool| -|Best Practice Reference|No Doc Link - Far Edge| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Inconsistent CPU pool selection can cause performance interference and unpredictable latency in real-time applications.| -|Tags|faredge,performance| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| Unique ID | performance-exclusive-cpu-pool | +| Description | Ensures that if one container in a Pod selects an exclusive CPU pool the rest select the same type of CPU pool | +| Suggested Remediation | Ensure that if one container in a Pod selects an exclusive CPU pool the rest also select this type of CPU pool | +| Best Practice Reference | No Doc Link - Far Edge | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Inconsistent CPU pool selection can cause performance interference and unpredictable latency in real-time applications. | +| Tags | faredge,performance | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Optional | #### performance-exclusive-cpu-pool-rt-scheduling-policy -|Property|Description| -|---|---| -|Unique ID|performance-exclusive-cpu-pool-rt-scheduling-policy| -|Description|Ensures that if application workload runs in exclusive CPU pool, it chooses RT CPU schedule policy and set the priority less than 10.| -|Suggested Remediation|Ensure that the workload running in Application exclusive CPU pool can choose RT CPU scheduling policy, but should set priority less than 10| -|Best Practice Reference|No Doc Link - Far Edge| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Wrong scheduling policies in exclusive CPU pools can prevent real-time applications from meeting latency requirements.| -|Tags|faredge,performance| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | performance-exclusive-cpu-pool-rt-scheduling-policy | +| Description | Ensures that if application workload runs in exclusive CPU pool, it chooses RT CPU schedule policy and set the priority less than 10. | +| Suggested Remediation | Ensure that the workload running in Application exclusive CPU pool can choose RT CPU scheduling policy, but should set priority less than 10 | +| Best Practice Reference | No Doc Link - Far Edge | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Wrong scheduling policies in exclusive CPU pools can prevent real-time applications from meeting latency requirements. | +| Tags | faredge,performance | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Optional | #### performance-isolated-cpu-pool-rt-scheduling-policy -|Property|Description| -|---|---| -|Unique ID|performance-isolated-cpu-pool-rt-scheduling-policy| -|Description|Ensures that a workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy| -|Suggested Remediation|Ensure that the workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy (such as SCHED_FIFO/SCHED_RR) with High priority.| -|Best Practice Reference|No Doc Link - Far Edge| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Incorrect scheduling policies in isolated CPU pools can cause performance degradation and violate real-time guarantees.| -|Tags|faredge,performance| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | performance-isolated-cpu-pool-rt-scheduling-policy | +| Description | Ensures that a workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy | +| Suggested Remediation | Ensure that the workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy (such as SCHED_FIFO/SCHED_RR) with High priority. | +| Best Practice Reference | No Doc Link - Far Edge | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Incorrect scheduling policies in isolated CPU pools can cause performance degradation and violate real-time guarantees. | +| Tags | faredge,performance | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Optional | #### performance-max-resources-exec-probes -|Property|Description| -|---|---| -|Unique ID|performance-max-resources-exec-probes| -|Description|Checks that less than 10 exec probes are configured in the cluster for this workload. Also checks that the periodSeconds parameter for each probe is superior or equal to 10.| -|Suggested Remediation|Reduce the number of exec probes in the cluster for this workload to less than 10. Increase the update period of the exec probe to be superior or equal to 10 seconds.| -|Best Practice Reference|No Doc Link - Far Edge| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Excessive exec probes can overwhelm system resources, degrade performance, and interfere with critical application operations in resource-constrained environments.| -|Tags|faredge,performance| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | performance-max-resources-exec-probes | +| Description | Checks that less than 10 exec probes are configured in the cluster for this workload. Also checks that the periodSeconds parameter for each probe is superior or equal to 10. | +| Suggested Remediation | Reduce the number of exec probes in the cluster for this workload to less than 10. Increase the update period of the exec probe to be superior or equal to 10 seconds. | +| Best Practice Reference | No Doc Link - Far Edge | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Excessive exec probes can overwhelm system resources, degrade performance, and interfere with critical application operations in resource-constrained environments. | +| Tags | faredge,performance | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | + +#### performance-cpu-pinning-no-exec-probes + +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | performance-cpu-pinning-no-exec-probes | +| Description | Workloads utilizing CPU pinning (Guaranteed QoS with exclusive CPUs) should not use exec probes. Exec probes run a command within the container, which could interfere with latency-sensitive workloads and cause performance degradation. | +| Suggested Remediation | Workloads that use CPU pinning (Guaranteed QoS with exclusive CPUs) should not use exec probes. Use httpGet or tcpSocket probes instead, as exec probes can interfere with latency-sensitive workloads requiring non-interruptible task execution. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Exec probes on workloads with CPU pinning (exclusive CPUs) can cause performance degradation, interrupt latency-sensitive operations, and potentially crash applications due to resource contention. Any workload requiring exclusive CPUs inherently needs non-interruptible task execution. | +| Tags | telco,performance | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Mandatory | #### performance-rt-apps-no-exec-probes -|Property|Description| -|---|---| -|Unique ID|performance-rt-apps-no-exec-probes| -|Description|Ensures that if one container runs a real time application exec probes are not used| -|Suggested Remediation|Ensure that if one container runs a real time application exec probes are not used| -|Best Practice Reference|No Doc Link - Far Edge| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Exec probes on real-time applications can cause latency spikes and interrupt time-critical operations.| -|Tags|faredge,performance| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------ | +| Unique ID | performance-rt-apps-no-exec-probes | +| Description | Ensures that if one container runs a real time application exec probes are not used | +| Suggested Remediation | Ensure that if one container runs a real time application exec probes are not used | +| Best Practice Reference | No Doc Link - Far Edge | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Exec probes on real-time applications can cause latency spikes and interrupt time-critical operations. | +| Tags | faredge,performance | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Optional | #### performance-shared-cpu-pool-non-rt-scheduling-policy -|Property|Description| -|---|---| -|Unique ID|performance-shared-cpu-pool-non-rt-scheduling-policy| -|Description|Ensures that if application workload runs in shared CPU pool, it chooses non-RT CPU schedule policy to always share the CPU with other applications and kernel threads.| -|Suggested Remediation|Ensure that the workload running in Application shared CPU pool should choose non-RT CPU schedule policy, like SCHED _OTHER to always share the CPU with other applications and kernel threads.| -|Best Practice Reference|No Doc Link - Far Edge| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Incorrect scheduling policies in shared CPU pools can cause performance interference and unfair resource distribution.| -|Tags|faredge,performance| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | performance-shared-cpu-pool-non-rt-scheduling-policy | +| Description | Ensures that if application workload runs in shared CPU pool, it chooses non-RT CPU schedule policy to always share the CPU with other applications and kernel threads. | +| Suggested Remediation | Ensure that the workload running in Application shared CPU pool should choose non-RT CPU schedule policy, like SCHED _OTHER to always share the CPU with other applications and kernel threads. | +| Best Practice Reference | No Doc Link - Far Edge | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Incorrect scheduling policies in shared CPU pools can cause performance interference and unfair resource distribution. | +| Tags | faredge,performance | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Optional | ### platform-alteration #### platform-alteration-base-image -|Property|Description| -|---|---| -|Unique ID|platform-alteration-base-image| -|Description|Ensures that the Container Base Image is not altered post-startup. This test is a heuristic, and ensures that there are no changes to the following directories: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64| -|Suggested Remediation|Ensure that Container applications do not modify the Container Base Image. In particular, ensure that the following directories are not modified: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64 Ensure that all required binaries are built directly into the container image, and are not installed post startup.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-standards| -|Exception Process|No exceptions| -|Impact Statement|Modified base images can introduce security vulnerabilities, create inconsistent behavior, and violate immutable infrastructure principles.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-base-image | +| Description | Ensures that the Container Base Image is not altered post-startup. This test is a heuristic, and ensures that there are no changes to the following directories: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64 | +| Suggested Remediation | Ensure that Container applications do not modify the Container Base Image. In particular, ensure that the following directories are not modified: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64 Ensure that all required binaries are built directly into the container image, and are not installed post startup. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-standards | +| Exception Process | No exceptions | +| Impact Statement | Modified base images can introduce security vulnerabilities, create inconsistent behavior, and violate immutable infrastructure principles. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### platform-alteration-boot-params -|Property|Description| -|---|---| -|Unique ID|platform-alteration-boot-params| -|Description|Tests that boot parameters are set through the MachineConfigOperator, and not set manually on the Node.| -|Suggested Remediation|Ensure that boot parameters are set directly through the MachineConfigOperator, or indirectly through the PerformanceAddonOperator. Boot parameters should not be changed directly through the Node, as OpenShift should manage the changes for you.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os| -|Exception Process|No exceptions| -|Impact Statement|Manual boot parameter changes bypass cluster configuration management and can cause node instability and configuration drift.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-boot-params | +| Description | Tests that boot parameters are set through the MachineConfigOperator, and not set manually on the Node. | +| Suggested Remediation | Ensure that boot parameters are set directly through the MachineConfigOperator, or indirectly through the PerformanceAddonOperator. Boot parameters should not be changed directly through the Node, as OpenShift should manage the changes for you. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os | +| Exception Process | No exceptions | +| Impact Statement | Manual boot parameter changes bypass cluster configuration management and can cause node instability and configuration drift. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### platform-alteration-cluster-operator-health -|Property|Description| -|---|---| -|Unique ID|platform-alteration-cluster-operator-health| -|Description|Tests that all cluster operators are healthy.| -|Suggested Remediation|Ensure each cluster operator is in an 'Available' state. If an operator is not in an 'Available' state, investigate the operator's logs and events to determine the cause of the failure.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements| -|Exception Process|No exceptions| -|Impact Statement|Unhealthy cluster operators can cause platform instability, feature failures, and degraded cluster functionality.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-cluster-operator-health | +| Description | Tests that all cluster operators are healthy. | +| Suggested Remediation | Ensure each cluster operator is in an 'Available' state. If an operator is not in an 'Available' state, investigate the operator's logs and events to determine the cause of the failure. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements | +| Exception Process | No exceptions | +| Impact Statement | Unhealthy cluster operators can cause platform instability, feature failures, and degraded cluster functionality. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### platform-alteration-hugepages-1g-only -|Property|Description| -|---|---| -|Unique ID|platform-alteration-hugepages-1g-only| -|Description|Check that pods using hugepages only use 1Gi size| -|Suggested Remediation|Modify pod to consume 1Gi hugepages only| -|Best Practice Reference|No Doc Link - Far Edge| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Incorrect hugepage configuration can lead to memory fragmentation and application startup failures in memory-constrained environments.| -|Tags|faredge,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Mandatory| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-hugepages-1g-only | +| Description | Check that pods using hugepages only use 1Gi size | +| Suggested Remediation | Modify pod to consume 1Gi hugepages only | +| Best Practice Reference | No Doc Link - Far Edge | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Incorrect hugepage configuration can lead to memory fragmentation and application startup failures in memory-constrained environments. | +| Tags | faredge,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Mandatory | +| Non-Telco | Optional | +| Telco | Optional | #### platform-alteration-hugepages-2m-only -|Property|Description| -|---|---| -|Unique ID|platform-alteration-hugepages-2m-only| -|Description|Check that pods using hugepages only use 2Mi size| -|Suggested Remediation|Modify pod to consume 2Mi hugepages only| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Using inappropriate hugepage sizes can cause memory allocation failures and reduce overall system performance and stability.| -|Tags|extended,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-hugepages-2m-only | +| Description | Check that pods using hugepages only use 2Mi size | +| Suggested Remediation | Modify pod to consume 2Mi hugepages only | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Using inappropriate hugepage sizes can cause memory allocation failures and reduce overall system performance and stability. | +| Tags | extended,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### platform-alteration-hugepages-config -|Property|Description| -|---|---| -|Unique ID|platform-alteration-hugepages-config| -|Description|Checks to see that HugePage settings have been configured through MachineConfig, and not manually on the underlying Node. This test case applies only to Nodes that are labeled as workers with the standard label "node-role.kubernetes.io/worker". First, the MachineConfig is inspected for hugepage settings in systemd units. If not, the MC's .spec.kernelArguments are inspected for hugepage settings. The sizes and page numbers are compared, and the test passes only if they are the same than then ones in node's /sys/kernel/mm/hugepages/hugepages-X folders.| -|Suggested Remediation|HugePage settings for worker nodes must be configured either directly through the MachineConfigOperator or indirectly using the PerformanceAddonOperator. Avoid making changes directly to an underlying Node, and let OpenShift handle the heavy lifting of configuring advanced settings.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages| -|Exception Process|No exceptions| -|Impact Statement|Manual hugepage configuration bypasses cluster management, can cause node instability, and creates configuration drift issues.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-hugepages-config | +| Description | Checks to see that HugePage settings have been configured through MachineConfig, and not manually on the underlying Node. This test case applies only to Nodes that are labeled as workers with the standard label "node-role.kubernetes.io/worker". First, the MachineConfig is inspected for hugepage settings in systemd units. If not, the MC's .spec.kernelArguments are inspected for hugepage settings. The sizes and page numbers are compared, and the test passes only if they are the same than then ones in node's /sys/kernel/mm/hugepages/hugepages-X folders. | +| Suggested Remediation | HugePage settings for worker nodes must be configured either directly through the MachineConfigOperator or indirectly using the PerformanceAddonOperator. Avoid making changes directly to an underlying Node, and let OpenShift handle the heavy lifting of configuring advanced settings. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages | +| Exception Process | No exceptions | +| Impact Statement | Manual hugepage configuration bypasses cluster management, can cause node instability, and creates configuration drift issues. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### platform-alteration-hyperthread-enable -|Property|Description| -|---|---| -|Unique ID|platform-alteration-hyperthread-enable| -|Description|Check that baremetal workers have hyperthreading enabled| -|Suggested Remediation|Check that baremetal workers have hyperthreading enabled| -|Best Practice Reference|No Doc Link - Extended| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Disabled hyperthreading reduces CPU performance and can affect workload scheduling and resource utilization efficiency.| -|Tags|extended,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-hyperthread-enable | +| Description | Check that baremetal workers have hyperthreading enabled | +| Suggested Remediation | Check that baremetal workers have hyperthreading enabled | +| Best Practice Reference | No Doc Link - Extended | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Disabled hyperthreading reduces CPU performance and can affect workload scheduling and resource utilization efficiency. | +| Tags | extended,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### platform-alteration-is-selinux-enforcing -|Property|Description| -|---|---| -|Unique ID|platform-alteration-is-selinux-enforcing| -|Description|verifies that all openshift platform/cluster nodes have selinux in "Enforcing" mode.| -|Suggested Remediation|Configure selinux and enable enforcing mode.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-security| -|Exception Process|No exceptions| -|Impact Statement|Non-enforcing SELinux reduces security isolation and can allow privilege escalation attacks and unauthorized resource access.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-is-selinux-enforcing | +| Description | verifies that all openshift platform/cluster nodes have selinux in "Enforcing" mode. | +| Suggested Remediation | Configure selinux and enable enforcing mode. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-security | +| Exception Process | No exceptions | +| Impact Statement | Non-enforcing SELinux reduces security isolation and can allow privilege escalation attacks and unauthorized resource access. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### platform-alteration-isredhat-release -|Property|Description| -|---|---| -|Unique ID|platform-alteration-isredhat-release| -|Description|verifies if the container base image is redhat.| -|Suggested Remediation|Build a new container image that is based on UBI (Red Hat Universal Base Image).| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security| -|Exception Process|No exceptions| -|Impact Statement|Non-Red Hat base images may lack security updates, enterprise support, and compliance certifications required for production use.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-isredhat-release | +| Description | verifies if the container base image is redhat. | +| Suggested Remediation | Build a new container image that is based on UBI (Red Hat Universal Base Image). | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security | +| Exception Process | No exceptions | +| Impact Statement | Non-Red Hat base images may lack security updates, enterprise support, and compliance certifications required for production use. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### platform-alteration-ocp-lifecycle -|Property|Description| -|---|---| -|Unique ID|platform-alteration-ocp-lifecycle| -|Description|Tests that the running OCP version is not end of life.| -|Suggested Remediation|Please update your cluster to a version that is generally available.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-k8s| -|Exception Process|No exceptions| -|Impact Statement|End-of-life OpenShift versions lack security updates and support, creating significant security and operational risks.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-ocp-lifecycle | +| Description | Tests that the running OCP version is not end of life. | +| Suggested Remediation | Please update your cluster to a version that is generally available. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-k8s | +| Exception Process | No exceptions | +| Impact Statement | End-of-life OpenShift versions lack security updates and support, creating significant security and operational risks. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### platform-alteration-ocp-node-os-lifecycle -|Property|Description| -|---|---| -|Unique ID|platform-alteration-ocp-node-os-lifecycle| -|Description|Tests that the nodes running in the cluster have operating systems that are compatible with the deployed version of OpenShift.| -|Suggested Remediation|Please update your workers to a version that is supported by your version of OpenShift| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os| -|Exception Process|No exceptions| -|Impact Statement|Incompatible node operating systems can cause stability issues, security vulnerabilities, and lack of vendor support.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | platform-alteration-ocp-node-os-lifecycle | +| Description | Tests that the nodes running in the cluster have operating systems that are compatible with the deployed version of OpenShift. | +| Suggested Remediation | Please update your workers to a version that is supported by your version of OpenShift | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os | +| Exception Process | No exceptions | +| Impact Statement | Incompatible node operating systems can cause stability issues, security vulnerabilities, and lack of vendor support. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### platform-alteration-service-mesh-usage -|Property|Description| -|---|---| -|Unique ID|platform-alteration-service-mesh-usage| -|Description|Checks if the istio namespace ("istio-system") is present. If it is present, checks that the istio sidecar is present in all pods under test.| -|Suggested Remediation|Ensure all the workload pods are using service mesh if the cluster provides it.| -|Best Practice Reference|No Doc Link - Extended| -|Exception Process|No exception needed for optional/extended tests.| -|Impact Statement|Inconsistent service mesh configuration can create security gaps, monitoring blind spots, and traffic management issues.| -|Tags|extended,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-service-mesh-usage | +| Description | Checks if the istio namespace ("istio-system") is present. If it is present, checks that the istio sidecar is present in all pods under test. | +| Suggested Remediation | Ensure all the workload pods are using service mesh if the cluster provides it. | +| Best Practice Reference | No Doc Link - Extended | +| Exception Process | No exception needed for optional/extended tests. | +| Impact Statement | Inconsistent service mesh configuration can create security gaps, monitoring blind spots, and traffic management issues. | +| Tags | extended,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### platform-alteration-sysctl-config -|Property|Description| -|---|---| -|Unique ID|platform-alteration-sysctl-config| -|Description|Tests that no one has changed the node's sysctl configs after the node was created, the tests works by checking if the sysctl configs are consistent with the MachineConfig CR which defines how the node should be configured| -|Suggested Remediation|You should recreate the node or change the sysctls, recreating is recommended because there might be other unknown changes| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security| -|Exception Process|No exceptions| -|Impact Statement|Manual sysctl modifications can cause system instability, security vulnerabilities, and unpredictable kernel behavior.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | platform-alteration-sysctl-config | +| Description | Tests that no one has changed the node's sysctl configs after the node was created, the tests works by checking if the sysctl configs are consistent with the MachineConfig CR which defines how the node should be configured | +| Suggested Remediation | You should recreate the node or change the sysctls, recreating is recommended because there might be other unknown changes | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security | +| Exception Process | No exceptions | +| Impact Statement | Manual sysctl modifications can cause system instability, security vulnerabilities, and unpredictable kernel behavior. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | #### platform-alteration-tainted-node-kernel -|Property|Description| -|---|---| -|Unique ID|platform-alteration-tainted-node-kernel| -|Description|Ensures that the Node(s) hosting workloads do not utilize tainted kernels. This test case is especially important to support Highly Available workloads, since when a workload is re-instantiated on a backup Node, that Node's kernel may not have the same hacks.'| -|Suggested Remediation|Test failure indicates that the underlying Node's kernel is tainted. Ensure that you have not altered underlying Node(s) kernels in order to run the workload.| -|Best Practice Reference|https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations| -|Exception Process|If taint is necessary, document details of the taint and why it's needed by workload or environment.| -|Impact Statement|Tainted kernels indicate unauthorized modifications that can introduce instability, security vulnerabilities, and support issues.| -|Tags|common,platform-alteration| -|**Scenario**|**Optional/Mandatory**| -|Extended|Mandatory| -|Far-Edge|Mandatory| -|Non-Telco|Mandatory| -|Telco|Mandatory| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | platform-alteration-tainted-node-kernel | +| Description | Ensures that the Node(s) hosting workloads do not utilize tainted kernels. This test case is especially important to support Highly Available workloads, since when a workload is re-instantiated on a backup Node, that Node's kernel may not have the same hacks.' | +| Suggested Remediation | Test failure indicates that the underlying Node's kernel is tainted. Ensure that you have not altered underlying Node(s) kernels in order to run the workload. | +| Best Practice Reference | https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations | +| Exception Process | If taint is necessary, document details of the taint and why it's needed by workload or environment. | +| Impact Statement | Tainted kernels indicate unauthorized modifications that can introduce instability, security vulnerabilities, and support issues. | +| Tags | common,platform-alteration | +| **Scenario** | **Optional/Mandatory** | +| Extended | Mandatory | +| Far-Edge | Mandatory | +| Non-Telco | Mandatory | +| Telco | Mandatory | ### preflight #### preflight-AllImageRefsInRelatedImages -|Property|Description| -|---|---| -|Unique ID|preflight-AllImageRefsInRelatedImages| -|Description|Check that all images in the CSV are listed in RelatedImages section. Currently, this check is not enforced.| -|Suggested Remediation|Either manually or with a tool, populate the RelatedImages section of the CSV| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing or incorrect image references in related images can cause deployment failures and broken operator functionality.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | preflight-AllImageRefsInRelatedImages | +| Description | Check that all images in the CSV are listed in RelatedImages section. Currently, this check is not enforced. | +| Suggested Remediation | Either manually or with a tool, populate the RelatedImages section of the CSV | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing or incorrect image references in related images can cause deployment failures and broken operator functionality. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-BasedOnUbi -|Property|Description| -|---|---| -|Unique ID|preflight-BasedOnUbi| -|Description|Checking if the container's base image is based upon the Red Hat Universal Base Image (UBI)| -|Suggested Remediation|Change the FROM directive in your Dockerfile or Containerfile, for the latest list of images and details refer to: https://catalog.redhat.com/software/base-images| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Non-UBI base images may lack security updates, enterprise support, and compliance certifications required for production use.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | preflight-BasedOnUbi | +| Description | Checking if the container's base image is based upon the Red Hat Universal Base Image (UBI) | +| Suggested Remediation | Change the FROM directive in your Dockerfile or Containerfile, for the latest list of images and details refer to: https://catalog.redhat.com/software/base-images | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Non-UBI base images may lack security updates, enterprise support, and compliance certifications required for production use. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-BundleImageRefsAreCertified -|Property|Description| -|---|---| -|Unique ID|preflight-BundleImageRefsAreCertified| -|Description|Checking that all images referenced in the CSV are certified. Currently, this check is not enforced.| -|Suggested Remediation|Ensure that any images referenced in the CSV, including the relatedImages section, have been certified.| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Uncertified bundle image references can introduce security vulnerabilities and compatibility issues in production deployments.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| Unique ID | preflight-BundleImageRefsAreCertified | +| Description | Checking that all images referenced in the CSV are certified. Currently, this check is not enforced. | +| Suggested Remediation | Ensure that any images referenced in the CSV, including the relatedImages section, have been certified. | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Uncertified bundle image references can introduce security vulnerabilities and compatibility issues in production deployments. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-DeployableByOLM -|Property|Description| -|---|---| -|Unique ID|preflight-DeployableByOLM| -|Description|Checking if the operator could be deployed by OLM| -|Suggested Remediation|Follow the guidelines on the operator-sdk website to learn how to package your operator https://sdk.operatorframework.io/docs/olm-integration/cli-overview/| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Operators not deployable by OLM cannot be properly managed, updated, or integrated into OpenShift lifecycle management.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-DeployableByOLM | +| Description | Checking if the operator could be deployed by OLM | +| Suggested Remediation | Follow the guidelines on the operator-sdk website to learn how to package your operator https://sdk.operatorframework.io/docs/olm-integration/cli-overview/ | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Operators not deployable by OLM cannot be properly managed, updated, or integrated into OpenShift lifecycle management. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-FollowsRestrictedNetworkEnablementGuidelines -|Property|Description| -|---|---| -|Unique ID|preflight-FollowsRestrictedNetworkEnablementGuidelines| -|Description|Checks for indicators that this bundle has implemented guidelines to indicate readiness for running in a disconnected cluster, or a cluster with a restricted network.| -|Suggested Remediation|If consumers of your operator may need to do so on a restricted network, implement the guidelines outlined in OCP documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/latest/html/disconnected_environments/olm-restricted-networks| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Non-compliance with restricted network guidelines can prevent deployment in air-gapped environments and violate security policies.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-FollowsRestrictedNetworkEnablementGuidelines | +| Description | Checks for indicators that this bundle has implemented guidelines to indicate readiness for running in a disconnected cluster, or a cluster with a restricted network. | +| Suggested Remediation | If consumers of your operator may need to do so on a restricted network, implement the guidelines outlined in OCP documentation: https://docs.redhat.com/en/documentation/openshift_container_platform/latest/html/disconnected_environments/olm-restricted-networks | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Non-compliance with restricted network guidelines can prevent deployment in air-gapped environments and violate security policies. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-HasLicense -|Property|Description| -|---|---| -|Unique ID|preflight-HasLicense| -|Description|Checking if terms and conditions applicable to the software including open source licensing information are present. The license must be at /licenses| -|Suggested Remediation|Create a directory named /licenses and include all relevant licensing and/or terms and conditions as text file(s) in that directory.| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing license information can create legal compliance issues and prevent proper software asset management.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-HasLicense | +| Description | Checking if terms and conditions applicable to the software including open source licensing information are present. The license must be at /licenses | +| Suggested Remediation | Create a directory named /licenses and include all relevant licensing and/or terms and conditions as text file(s) in that directory. | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing license information can create legal compliance issues and prevent proper software asset management. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-HasModifiedFiles -|Property|Description| -|---|---| -|Unique ID|preflight-HasModifiedFiles| -|Description|Checks that no files installed via RPM in the base Red Hat layer have been modified| -|Suggested Remediation|Do not modify any files installed by RPM in the base Red Hat layer| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Modified files in containers can introduce security vulnerabilities, create inconsistent behavior, and violate immutable infrastructure principles.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-HasModifiedFiles | +| Description | Checks that no files installed via RPM in the base Red Hat layer have been modified | +| Suggested Remediation | Do not modify any files installed by RPM in the base Red Hat layer | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Modified files in containers can introduce security vulnerabilities, create inconsistent behavior, and violate immutable infrastructure principles. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-HasNoProhibitedLabels -|Property|Description| -|---|---| -|Unique ID|preflight-HasNoProhibitedLabels| -|Description|Checking if the labels (name, vendor, maintainer) violate Red Hat trademark.| -|Suggested Remediation|Ensure the name, vendor, and maintainer label on your image do not violate the Red Hat trademark.| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Misuse of Red Hat trademarks in name, vendor, or maintainer labels creates legal and compliance risks that can block certification and publication.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-HasNoProhibitedLabels | +| Description | Checking if the labels (name, vendor, maintainer) violate Red Hat trademark. | +| Suggested Remediation | Ensure the name, vendor, and maintainer label on your image do not violate the Red Hat trademark. | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Misuse of Red Hat trademarks in name, vendor, or maintainer labels creates legal and compliance risks that can block certification and publication. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-HasNoProhibitedPackages -|Property|Description| -|---|---| -|Unique ID|preflight-HasNoProhibitedPackages| -|Description|Checks to ensure that the image in use does not include prohibited packages, such as Red Hat Enterprise Linux (RHEL) kernel packages.| -|Suggested Remediation|Remove any RHEL packages that are not distributable outside of UBI| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Prohibited packages can introduce security vulnerabilities, licensing issues, and compliance violations.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-HasNoProhibitedPackages | +| Description | Checks to ensure that the image in use does not include prohibited packages, such as Red Hat Enterprise Linux (RHEL) kernel packages. | +| Suggested Remediation | Remove any RHEL packages that are not distributable outside of UBI | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Prohibited packages can introduce security vulnerabilities, licensing issues, and compliance violations. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-HasProhibitedContainerName -|Property|Description| -|---|---| -|Unique ID|preflight-HasProhibitedContainerName| -|Description|Checking if the container-name violates Red Hat trademark.| -|Suggested Remediation|Update container-name ie (quay.io/repo-name/container-name) to not violate Red Hat trademark.| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Prohibited container names can cause conflicts with system components and violate naming conventions.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-HasProhibitedContainerName | +| Description | Checking if the container-name violates Red Hat trademark. | +| Suggested Remediation | Update container-name ie (quay.io/repo-name/container-name) to not violate Red Hat trademark. | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Prohibited container names can cause conflicts with system components and violate naming conventions. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-HasRequiredLabel -|Property|Description| -|---|---| -|Unique ID|preflight-HasRequiredLabel| -|Description|Checking if the required labels (name, vendor, version, release, summary, description, maintainer) are present in the container metadata| -|Suggested Remediation|Add the following labels to your Dockerfile or Containerfile: name, vendor, version, release, summary, description, maintainer.| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing required labels prevent proper metadata management and can cause deployment and management issues.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-HasRequiredLabel | +| Description | Checking if the required labels (name, vendor, version, release, summary, description, maintainer) are present in the container metadata | +| Suggested Remediation | Add the following labels to your Dockerfile or Containerfile: name, vendor, version, release, summary, description, maintainer. | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing required labels prevent proper metadata management and can cause deployment and management issues. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-HasUniqueTag -|Property|Description| -|---|---| -|Unique ID|preflight-HasUniqueTag| -|Description|Checking if container has a tag other than 'latest', so that the image can be uniquely identified.| -|Suggested Remediation|Add a tag to your image. Consider using Semantic Versioning. https://semver.org/| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Non-unique tags can cause version conflicts and deployment inconsistencies, making rollbacks and troubleshooting difficult.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-HasUniqueTag | +| Description | Checking if container has a tag other than 'latest', so that the image can be uniquely identified. | +| Suggested Remediation | Add a tag to your image. Consider using Semantic Versioning. https://semver.org/ | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Non-unique tags can cause version conflicts and deployment inconsistencies, making rollbacks and troubleshooting difficult. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-LayerCountAcceptable -|Property|Description| -|---|---| -|Unique ID|preflight-LayerCountAcceptable| -|Description|Checking if container has less than 40 layers. Too many layers within the container images can degrade container performance.| -|Suggested Remediation|Optimize your Dockerfile to consolidate and minimize the number of layers. Each RUN command will produce a new layer. Try combining RUN commands using && where possible.| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Excessive image layers can cause poor performance, increased storage usage, and longer deployment times.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-LayerCountAcceptable | +| Description | Checking if container has less than 40 layers. Too many layers within the container images can degrade container performance. | +| Suggested Remediation | Optimize your Dockerfile to consolidate and minimize the number of layers. Each RUN command will produce a new layer. Try combining RUN commands using && where possible. | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Excessive image layers can cause poor performance, increased storage usage, and longer deployment times. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-RequiredAnnotations -|Property|Description| -|---|---| -|Unique ID|preflight-RequiredAnnotations| -|Description|Checks that the CSV has all of the required feature annotations.| -|Suggested Remediation|Add all of the required annotations, and make sure the value is set to either 'true' or 'false'| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Missing required annotations can prevent proper operator lifecycle management and cause deployment failures.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------ | +| Unique ID | preflight-RequiredAnnotations | +| Description | Checks that the CSV has all of the required feature annotations. | +| Suggested Remediation | Add all of the required annotations, and make sure the value is set to either 'true' or 'false' | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Missing required annotations can prevent proper operator lifecycle management and cause deployment failures. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-RunAsNonRoot -|Property|Description| -|---|---| -|Unique ID|preflight-RunAsNonRoot| -|Description|Checking if container runs as the root user because a container that does not specify a non-root user will fail the automatic certification, and will be subject to a manual review before the container can be approved for publication| -|Suggested Remediation|Indicate a specific USER in the dockerfile or containerfile| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Running containers as root increases the blast radius of security vulnerabilities and can lead to full host compromise if containers are breached.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-RunAsNonRoot | +| Description | Checking if container runs as the root user because a container that does not specify a non-root user will fail the automatic certification, and will be subject to a manual review before the container can be approved for publication | +| Suggested Remediation | Indicate a specific USER in the dockerfile or containerfile | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Running containers as root increases the blast radius of security vulnerabilities and can lead to full host compromise if containers are breached. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-ScorecardBasicSpecCheck -|Property|Description| -|---|---| -|Unique ID|preflight-ScorecardBasicSpecCheck| -|Description|Check to make sure that all CRs have a spec block.| -|Suggested Remediation|Make sure that all CRs have a spec block| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Failing basic scorecard checks indicates fundamental operator implementation issues that can cause runtime failures.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-ScorecardBasicSpecCheck | +| Description | Check to make sure that all CRs have a spec block. | +| Suggested Remediation | Make sure that all CRs have a spec block | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Failing basic scorecard checks indicates fundamental operator implementation issues that can cause runtime failures. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-ScorecardOlmSuiteCheck -|Property|Description| -|---|---| -|Unique ID|preflight-ScorecardOlmSuiteCheck| -|Description|Operator-sdk scorecard OLM Test Suite Check| -|Suggested Remediation|See scorecard output for details, artifacts/operator_bundle_scorecard_OlmSuiteCheck.json| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Failing OLM suite checks indicates operator lifecycle management issues that can prevent proper installation and updates.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-ScorecardOlmSuiteCheck | +| Description | Operator-sdk scorecard OLM Test Suite Check | +| Suggested Remediation | See scorecard output for details, artifacts/operator_bundle_scorecard_OlmSuiteCheck.json | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Failing OLM suite checks indicates operator lifecycle management issues that can prevent proper installation and updates. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-SecurityContextConstraintsInCSV -|Property|Description| -|---|---| -|Unique ID|preflight-SecurityContextConstraintsInCSV| -|Description|Evaluates the csv and logs a message if a non default security context constraint is needed by the operator| -|Suggested Remediation|If no scc is detected the default restricted scc will be used.| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Incorrect SCC definitions in CSV can cause security policy violations and deployment failures.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-SecurityContextConstraintsInCSV | +| Description | Evaluates the csv and logs a message if a non default security context constraint is needed by the operator | +| Suggested Remediation | If no scc is detected the default restricted scc will be used. | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Incorrect SCC definitions in CSV can cause security policy violations and deployment failures. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | #### preflight-ValidateOperatorBundle -|Property|Description| -|---|---| -|Unique ID|preflight-ValidateOperatorBundle| -|Description|Validating Bundle image that checks if it can validate the content and format of the operator bundle| -|Suggested Remediation|Valid bundles are defined by bundle spec, so make sure that this bundle conforms to that spec. More Information: https://github.com/operator-framework/operator-registry/blob/master/docs/design/operator-bundle.md| -|Best Practice Reference|No Doc Link| -|Exception Process|There is no documented exception process for this.| -|Impact Statement|Invalid operator bundles can cause deployment failures, update issues, and operational instability.| -|Tags|common,preflight| -|**Scenario**|**Optional/Mandatory**| -|Extended|Optional| -|Far-Edge|Optional| -|Non-Telco|Optional| -|Telco|Optional| +| Property | Description | +| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Unique ID | preflight-ValidateOperatorBundle | +| Description | Validating Bundle image that checks if it can validate the content and format of the operator bundle | +| Suggested Remediation | Valid bundles are defined by bundle spec, so make sure that this bundle conforms to that spec. More Information: https://github.com/operator-framework/operator-registry/blob/master/docs/design/operator-bundle.md | +| Best Practice Reference | No Doc Link | +| Exception Process | There is no documented exception process for this. | +| Impact Statement | Invalid operator bundles can cause deployment failures, update issues, and operational instability. | +| Tags | common,preflight | +| **Scenario** | **Optional/Mandatory** | +| Extended | Optional | +| Far-Edge | Optional | +| Non-Telco | Optional | +| Telco | Optional | ## Security Context Categories diff --git a/cmd/certsuite/claim/compare/testdata/claim_access_control.json b/cmd/certsuite/claim/compare/testdata/claim_access_control.json index 3c24ef024..27d1ea7c4 100644 --- a/cmd/certsuite/claim/compare/testdata/claim_access_control.json +++ b/cmd/certsuite/claim/compare/testdata/claim_access_control.json @@ -1,7 +1,10 @@ { "claim": { - "configurations" : { - "AbnormalEvents" : ["EVENT2", "EVENT3"], + "configurations": { + "AbnormalEvents": [ + "EVENT2", + "EVENT3" + ], "Config": { "acceptedKernelTaints": [ { @@ -8276,2435 +8279,2435 @@ }, "results": { "access-control-bpf-capability-check": { - "capturedTestOutput": "Non compliant [BPF container: xdp-c pod: xdp ns: certsuite \u0026Capabilities{Add:[BPF PERFMON NET_ADMIN],Drop:[],}] capability detected in container %!s(MISSING). All container caps: %!s(MISSING)\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"xdp\",\"xdp-c\",\"BPF\"]}]}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Telco", - "description": "Ensures that containers do not use BFP capability. CNF should avoid loading eBPF filters", - "exceptionProcess": "Exception can be considered. Must identify which container requires the capability and detail why.", - "remediation": "Remove the following capability from the container/pod definitions: BPF" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 172850, - "endTime": "2023-09-04 09:18:50.568408558 -0500 CDT m=+20.103332734", - "failureLineContent": "\t\tfail(string(bytes))", - "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", - "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"xdp\",\"xdp-c\",\"BPF\"]}]}", - "startTime": "2023-09-04 09:18:50.568235709 -0500 CDT m=+20.103159884", - "state": "failed", - "testID": { - "id": "access-control-bpf-capability-check", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "Non compliant [BPF container: xdp-c pod: xdp ns: certsuite \u0026Capabilities{Add:[BPF PERFMON NET_ADMIN],Drop:[],}] capability detected in container %!s(MISSING). All container caps: %!s(MISSING)\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"xdp\",\"xdp-c\",\"BPF\"]}]}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Telco", + "description": "Ensures that containers do not use BFP capability. CNF should avoid loading eBPF filters", + "exceptionProcess": "Exception can be considered. Must identify which container requires the capability and detail why.", + "remediation": "Remove the following capability from the container/pod definitions: BPF" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 172850, + "endTime": "2023-09-04 09:18:50.568408558 -0500 CDT m=+20.103332734", + "failureLineContent": "\t\tfail(string(bytes))", + "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", + "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"xdp\",\"xdp-c\",\"BPF\"]}]}", + "startTime": "2023-09-04 09:18:50.568235709 -0500 CDT m=+20.103159884", + "state": "failed", + "testID": { + "id": "access-control-bpf-capability-check", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-cluster-role-bindings": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-rbac", - "description": "Tests that a Pod does not specify ClusterRoleBindings.", - "exceptionProcess": "Exception possible only for workloads that's cluster wide in nature and absolutely needs cluster level roles \u0026 role bindings", - "remediation": "In most cases, Pod's should not have ClusterRoleBindings. The suggested remediation is to remove the need for ClusterRoleBindings, if possible. Cluster roles and cluster role bindings discouraged unless absolutely needed by CNF (often reserved for cluster admin only)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 357044, - "endTime": "2023-09-04 09:18:50.664570781 -0500 CDT m=+20.199494961", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.664213742 -0500 CDT m=+20.199137917", - "state": "passed", - "testID": { - "id": "access-control-cluster-role-bindings", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using a cluster role binding\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-rbac", + "description": "Tests that a Pod does not specify ClusterRoleBindings.", + "exceptionProcess": "Exception possible only for workloads that's cluster wide in nature and absolutely needs cluster level roles \u0026 role bindings", + "remediation": "In most cases, Pod's should not have ClusterRoleBindings. The suggested remediation is to remove the need for ClusterRoleBindings, if possible. Cluster roles and cluster role bindings discouraged unless absolutely needed by CNF (often reserved for cluster admin only)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 357044, + "endTime": "2023-09-04 09:18:50.664570781 -0500 CDT m=+20.199494961", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.664213742 -0500 CDT m=+20.199137917", + "state": "passed", + "testID": { + "id": "access-control-cluster-role-bindings", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-container-host-port": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Host port is not configured\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Host port is not configured\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Host port is not configured\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Host port is not configured\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-accessing-resource-on-host", - "description": "Verifies if containers define a hostPort.", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Remove hostPort configuration from the container. CNF should avoid accessing host resources - containers should not configure HostPort." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 59399, - "endTime": "2023-09-04 09:18:50.568708574 -0500 CDT m=+20.103632750", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.568649176 -0500 CDT m=+20.103573351", - "state": "passed", - "testID": { - "id": "access-control-container-host-port", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Host port is not configured\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Host port is not configured\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Host port is not configured\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Host port is not configured\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-accessing-resource-on-host", + "description": "Verifies if containers define a hostPort.", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Remove hostPort configuration from the container. CNF should avoid accessing host resources - containers should not configure HostPort." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 59399, + "endTime": "2023-09-04 09:18:50.568708574 -0500 CDT m=+20.103632750", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.568649176 -0500 CDT m=+20.103573351", + "state": "passed", + "testID": { + "id": "access-control-container-host-port", + "suite": "access-control", + "tags": "common" + } + }, "access-control-crd-roles": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-custom-role-to-access-application-crds", - "description": "If an application creates CRDs it must supply a role to access those CRDs and no other API resources/permission. This test checks that there is at least one role present in each namespaces under test that only refers to CRDs under test.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Roles providing access to CRDs should not refer to any other api or resources. Change the generation of the CRD role accordingly" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 249071, - "endTime": "2023-09-04 09:19:06.658058151 -0500 CDT m=+36.192982330", - "failureLineContent": "\t\tginkgo.Skip(\"No role contains rules that apply to at least one CRD under test\")", - "failureLocation": "/home/greyerof/github/certsuite/certsuite/accesscontrol/suite.go:933", - "skipReason": "No role contains rules that apply to at least one CRD under test", - "startTime": "2023-09-04 09:19:06.657809085 -0500 CDT m=+36.192733259", - "state": "skipped", - "testID": { - "id": "access-control-crd-roles", - "suite": "access-control", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-custom-role-to-access-application-crds", + "description": "If an application creates CRDs it must supply a role to access those CRDs and no other API resources/permission. This test checks that there is at least one role present in each namespaces under test that only refers to CRDs under test.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Roles providing access to CRDs should not refer to any other api or resources. Change the generation of the CRD role accordingly" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 249071, + "endTime": "2023-09-04 09:19:06.658058151 -0500 CDT m=+36.192982330", + "failureLineContent": "\t\tginkgo.Skip(\"No role contains rules that apply to at least one CRD under test\")", + "failureLocation": "/home/greyerof/github/certsuite/certsuite/accesscontrol/suite.go:933", + "skipReason": "No role contains rules that apply to at least one CRD under test", + "startTime": "2023-09-04 09:19:06.657809085 -0500 CDT m=+36.192733259", + "state": "skipped", + "testID": { + "id": "access-control-crd-roles", + "suite": "access-control", + "tags": "extended" + } + }, "access-control-ipc-lock-capability-check": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipc_lock", - "description": "Ensures that containers do not use IPC_LOCK capability. CNF should avoid accessing host resources - spec.HostIpc should be false.", - "exceptionProcess": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 40401, - "endTime": "2023-09-04 09:18:50.568185748 -0500 CDT m=+20.103109922", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.568145348 -0500 CDT m=+20.103069521", - "state": "passed", - "testID": { - "id": "access-control-ipc-lock-capability-check", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipc_lock", + "description": "Ensures that containers do not use IPC_LOCK capability. CNF should avoid accessing host resources - spec.HostIpc should be false.", + "exceptionProcess": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 40401, + "endTime": "2023-09-04 09:18:50.568185748 -0500 CDT m=+20.103109922", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.568145348 -0500 CDT m=+20.103069521", + "state": "passed", + "testID": { + "id": "access-control-ipc-lock-capability-check", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-namespace": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\"],\"ObjectFieldsValues\":[\"Namespace has valid prefix\",\"certsuite\"]},{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\"],\"ObjectFieldsValues\":[\"Namespace has valid prefix\",\"certsuite\"]},{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\"],\"ObjectFieldsValues\":[\"Namespace has valid prefix\",\"certsuite\"]},{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\"],\"ObjectFieldsValues\":[\"Namespace has valid prefix\",\"certsuite\"]},{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\"],\"ObjectFieldsValues\":[\"CRs are in the configured namespaces\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", - "description": "Tests that all CNF's resources (PUTs and CRs) belong to valid namespaces. A valid namespace meets\nthe following conditions: (1) It was declared in the yaml config file under the targetNameSpaces\ntag. (2) It does not have any of the following prefixes: default, openshift-, istio- and aspenmesh-", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that your CNF utilizes namespaces declared in the yaml config file. Additionally, the namespaces should not start with \"default, openshift-, istio- or aspenmesh-\"." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 93706789, - "endTime": "2023-09-04 09:18:50.662756079 -0500 CDT m=+20.197680257", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.569049294 -0500 CDT m=+20.103973468", - "state": "passed", - "testID": { - "id": "access-control-namespace", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\"],\"ObjectFieldsValues\":[\"Namespace has valid prefix\",\"certsuite\"]},{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\"],\"ObjectFieldsValues\":[\"Namespace has valid prefix\",\"certsuite\"]},{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\"],\"ObjectFieldsValues\":[\"Namespace has valid prefix\",\"certsuite\"]},{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\"],\"ObjectFieldsValues\":[\"Namespace has valid prefix\",\"certsuite\"]},{\"ObjectType\":\"Namespace\",\"ObjectFieldsKeys\":[\"Reason For Compliance\"],\"ObjectFieldsValues\":[\"CRs are in the configured namespaces\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", + "description": "Tests that all CNF's resources (PUTs and CRs) belong to valid namespaces. A valid namespace meets\nthe following conditions: (1) It was declared in the yaml config file under the targetNameSpaces\ntag. (2) It does not have any of the following prefixes: default, openshift-, istio- and aspenmesh-", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that your CNF utilizes namespaces declared in the yaml config file. Additionally, the namespaces should not start with \"default, openshift-, istio- or aspenmesh-\"." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 93706789, + "endTime": "2023-09-04 09:18:50.662756079 -0500 CDT m=+20.197680257", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.569049294 -0500 CDT m=+20.103973468", + "state": "passed", + "testID": { + "id": "access-control-namespace", + "suite": "access-control", + "tags": "common" + } + }, "access-control-namespace-resource-quota": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-memory-allocation", - "description": "Checks to see if CNF workload pods are running in namespaces that have resource quotas applied.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Apply a ResourceQuota to the namespace your CNF is running in. The CNF namespace should have resource quota defined." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 75868, - "endTime": "2023-09-04 09:18:58.81676038 -0500 CDT m=+28.351684554", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:58.816684513 -0500 CDT m=+28.351608686", - "state": "passed", - "testID": { - "id": "access-control-namespace-resource-quota", - "suite": "access-control", - "tags": "extended" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is running in a namespace that has a ResourceQuota applied\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-memory-allocation", + "description": "Checks to see if CNF workload pods are running in namespaces that have resource quotas applied.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Apply a ResourceQuota to the namespace your CNF is running in. The CNF namespace should have resource quota defined." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 75868, + "endTime": "2023-09-04 09:18:58.81676038 -0500 CDT m=+28.351684554", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:58.816684513 -0500 CDT m=+28.351608686", + "state": "passed", + "testID": { + "id": "access-control-namespace-resource-quota", + "suite": "access-control", + "tags": "extended" + } + }, "access-control-net-admin-capability-check": { - "capturedTestOutput": "Non compliant [NET_ADMIN container: test pod: test-765d6b8dcf-gbvsd ns: certsuite \u0026Capabilities{Add:[NET_ADMIN],Drop:[],}] capability detected in container %!s(MISSING). All container caps: %!s(MISSING)\nNon compliant [NET_ADMIN container: test pod: test-765d6b8dcf-s768n ns: certsuite \u0026Capabilities{Add:[NET_ADMIN],Drop:[],}] capability detected in container %!s(MISSING). All container caps: %!s(MISSING)\nNon compliant [NET_ADMIN container: xdp-c pod: xdp ns: certsuite \u0026Capabilities{Add:[BPF PERFMON NET_ADMIN],Drop:[],}] capability detected in container %!s(MISSING). All container caps: %!s(MISSING)\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\",\"NET_ADMIN\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\",\"NET_ADMIN\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"xdp\",\"xdp-c\",\"NET_ADMIN\"]}]}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-net_admin", - "description": "Ensures that containers do not use NET_ADMIN capability. Note: this test also ensures iptables and nftables are not configured by CNF pods:\n- NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods.\nnftables should be configured by an administrator outside the scope of the CNF. nftables are usually configured\nby operators, for instance the Performance Addon Operator (PAO) or istio.\n- Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables\nshould be configured by an administrator outside the scope of the CNF. iptables are usually configured by operators,\nfor instance the Performance Addon Operator (PAO) or istio.", - "exceptionProcess": "Exception will be considered for user plane or networking functions (e.g. SR-IOV, Multicast). Must identify which container requires the capability and detail why.", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 232836, - "endTime": "2023-09-04 09:18:50.56793244 -0500 CDT m=+20.102856615", - "failureLineContent": "\t\tfail(string(bytes))", - "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", - "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\",\"NET_ADMIN\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\",\"NET_ADMIN\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"xdp\",\"xdp-c\",\"NET_ADMIN\"]}]}", - "startTime": "2023-09-04 09:18:50.567699604 -0500 CDT m=+20.102623779", - "state": "failed", - "testID": { - "id": "access-control-net-admin-capability-check", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "Non compliant [NET_ADMIN container: test pod: test-765d6b8dcf-gbvsd ns: certsuite \u0026Capabilities{Add:[NET_ADMIN],Drop:[],}] capability detected in container %!s(MISSING). All container caps: %!s(MISSING)\nNon compliant [NET_ADMIN container: test pod: test-765d6b8dcf-s768n ns: certsuite \u0026Capabilities{Add:[NET_ADMIN],Drop:[],}] capability detected in container %!s(MISSING). All container caps: %!s(MISSING)\nNon compliant [NET_ADMIN container: xdp-c pod: xdp ns: certsuite \u0026Capabilities{Add:[BPF PERFMON NET_ADMIN],Drop:[],}] capability detected in container %!s(MISSING). All container caps: %!s(MISSING)\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\",\"NET_ADMIN\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\",\"NET_ADMIN\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"xdp\",\"xdp-c\",\"NET_ADMIN\"]}]}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-net_admin", + "description": "Ensures that containers do not use NET_ADMIN capability. Note: this test also ensures iptables and nftables are not configured by CNF pods:\n- NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods.\nnftables should be configured by an administrator outside the scope of the CNF. nftables are usually configured\nby operators, for instance the Performance Addon Operator (PAO) or istio.\n- Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables\nshould be configured by an administrator outside the scope of the CNF. iptables are usually configured by operators,\nfor instance the Performance Addon Operator (PAO) or istio.", + "exceptionProcess": "Exception will be considered for user plane or networking functions (e.g. SR-IOV, Multicast). Must identify which container requires the capability and detail why.", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 232836, + "endTime": "2023-09-04 09:18:50.56793244 -0500 CDT m=+20.102856615", + "failureLineContent": "\t\tfail(string(bytes))", + "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", + "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\",\"NET_ADMIN\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\",\"NET_ADMIN\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"SCC Capability\"],\"ObjectFieldsValues\":[\"Non compliant capability detected in container\",\"certsuite\",\"xdp\",\"xdp-c\",\"NET_ADMIN\"]}]}", + "startTime": "2023-09-04 09:18:50.567699604 -0500 CDT m=+20.102623779", + "state": "failed", + "testID": { + "id": "access-control-net-admin-capability-check", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-net-raw-capability-check": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-user-plane-cnfs", - "description": "Ensures that containers do not use NET_RAW capability. Note: this test also ensures iptables and nftables are not configured by CNF pods:\n- NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods.\nnftables should be configured by an administrator outside the scope of the CNF. nftables are usually configured\nby operators, for instance the Performance Addon Operator (PAO) or istio.\n- Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables\nshould be configured by an administrator outside the scope of the CNF. iptables are usually configured by operators,\nfor instance the Performance Addon Operator (PAO) or istio.", - "exceptionProcess": "Exception will be considered for user plane or networking functions. Must identify which container requires the capability and detail why.", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 61098, - "endTime": "2023-09-04 09:18:50.568101953 -0500 CDT m=+20.103026127", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.568040854 -0500 CDT m=+20.102965029", - "state": "passed", - "testID": { - "id": "access-control-net-raw-capability-check", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-user-plane-cnfs", + "description": "Ensures that containers do not use NET_RAW capability. Note: this test also ensures iptables and nftables are not configured by CNF pods:\n- NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods.\nnftables should be configured by an administrator outside the scope of the CNF. nftables are usually configured\nby operators, for instance the Performance Addon Operator (PAO) or istio.\n- Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables\nshould be configured by an administrator outside the scope of the CNF. iptables are usually configured by operators,\nfor instance the Performance Addon Operator (PAO) or istio.", + "exceptionProcess": "Exception will be considered for user plane or networking functions. Must identify which container requires the capability and detail why.", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 61098, + "endTime": "2023-09-04 09:18:50.568101953 -0500 CDT m=+20.103026127", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.568040854 -0500 CDT m=+20.102965029", + "state": "passed", + "testID": { + "id": "access-control-net-raw-capability-check", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-no-1337-uid": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Extended", - "description": "Checks that all pods are not using the securityContext UID 1337", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Use another process UID that is not 1337." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 171143, - "endTime": "2023-09-04 09:19:06.657094654 -0500 CDT m=+36.192018829", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.656923511 -0500 CDT m=+36.191847686", - "state": "passed", - "testID": { - "id": "access-control-no-1337-uid", - "suite": "access-control", - "tags": "extended" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not using securityContext RunAsUser 1337\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Extended", + "description": "Checks that all pods are not using the securityContext UID 1337", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Use another process UID that is not 1337." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 171143, + "endTime": "2023-09-04 09:19:06.657094654 -0500 CDT m=+36.192018829", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.656923511 -0500 CDT m=+36.191847686", + "state": "passed", + "testID": { + "id": "access-control-no-1337-uid", + "suite": "access-control", + "tags": "extended" + } + }, "access-control-one-process-per-container": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-one-process-per-container", - "description": "Check that all containers under test have only one process running", - "exceptionProcess": "No exception needed for optional/extended tests. Not applicable to SNO applications.", - "remediation": "Launch only one process per container. Should adhere to 1 process per container best practice wherever possible." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 7850352500, - "endTime": "2023-09-04 09:18:58.815593303 -0500 CDT m=+28.350517478", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.965240803 -0500 CDT m=+20.500164978", - "state": "passed", - "testID": { - "id": "access-control-one-process-per-container", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has only one process running\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-one-process-per-container", + "description": "Check that all containers under test have only one process running", + "exceptionProcess": "No exception needed for optional/extended tests. Not applicable to SNO applications.", + "remediation": "Launch only one process per container. Should adhere to 1 process per container best practice wherever possible." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 7850352500, + "endTime": "2023-09-04 09:18:58.815593303 -0500 CDT m=+28.350517478", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.965240803 -0500 CDT m=+20.500164978", + "state": "passed", + "testID": { + "id": "access-control-one-process-per-container", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-automount-service-account-token": { - "capturedTestOutput": "Pod [xdp] has been found with default service account name.\n", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods", - "description": "Check that all pods under test have automountServiceAccountToken set to false. Only pods that require access to the kubernetes API server should have automountServiceAccountToken set to true", - "exceptionProcess": "Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.", - "remediation": "Check that pod has automountServiceAccountToken set to false or pod is attached to service account which has automountServiceAccountToken set to false, unless the pod needs access to the kubernetes API server. Pods which do not need API access should set automountServiceAccountToken to false in pod spec." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 300309189, - "endTime": "2023-09-04 09:18:50.964966148 -0500 CDT m=+20.499890322", - "failureLineContent": "\t\t\tginkgo.Fail(\"Pod has been found with default service account name.\")", - "failureLocation": "/home/greyerof/github/certsuite/certsuite/accesscontrol/suite.go:612", - "skipReason": "Pod has been found with default service account name.", - "startTime": "2023-09-04 09:18:50.664656954 -0500 CDT m=+20.199581133", - "state": "failed", - "testID": { - "id": "access-control-pod-automount-service-account-token", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "Pod [xdp] has been found with default service account name.\n", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods", + "description": "Check that all pods under test have automountServiceAccountToken set to false. Only pods that require access to the kubernetes API server should have automountServiceAccountToken set to true", + "exceptionProcess": "Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.", + "remediation": "Check that pod has automountServiceAccountToken set to false or pod is attached to service account which has automountServiceAccountToken set to false, unless the pod needs access to the kubernetes API server. Pods which do not need API access should set automountServiceAccountToken to false in pod spec." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 300309189, + "endTime": "2023-09-04 09:18:50.964966148 -0500 CDT m=+20.499890322", + "failureLineContent": "\t\t\tginkgo.Fail(\"Pod has been found with default service account name.\")", + "failureLocation": "/home/greyerof/github/certsuite/certsuite/accesscontrol/suite.go:612", + "skipReason": "Pod has been found with default service account name.", + "startTime": "2023-09-04 09:18:50.664656954 -0500 CDT m=+20.199581133", + "state": "failed", + "testID": { + "id": "access-control-pod-automount-service-account-token", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-pod-host-ipc": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Verifies that the spec.HostIpc parameter is set to false", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Set the spec.HostIpc parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostIpc should be false." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 30574, - "endTime": "2023-09-04 09:18:50.568931796 -0500 CDT m=+20.103855970", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.56890122 -0500 CDT m=+20.103825396", - "state": "passed", - "testID": { - "id": "access-control-pod-host-ipc", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostIpc is not set to true\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Verifies that the spec.HostIpc parameter is set to false", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Set the spec.HostIpc parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostIpc should be false." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 30574, + "endTime": "2023-09-04 09:18:50.568931796 -0500 CDT m=+20.103855970", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.56890122 -0500 CDT m=+20.103825396", + "state": "passed", + "testID": { + "id": "access-control-pod-host-ipc", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-host-network": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-the-host-network-namespace", - "description": "Verifies that the spec.HostNetwork parameter is not set (not present)", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Set the spec.HostNetwork parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostNetwork should be false." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 36651, - "endTime": "2023-09-04 09:18:50.568785325 -0500 CDT m=+20.103709500", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.568748674 -0500 CDT m=+20.103672849", - "state": "passed", - "testID": { - "id": "access-control-pod-host-network", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Host network is not set to true\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-the-host-network-namespace", + "description": "Verifies that the spec.HostNetwork parameter is not set (not present)", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Set the spec.HostNetwork parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostNetwork should be false." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 36651, + "endTime": "2023-09-04 09:18:50.568785325 -0500 CDT m=+20.103709500", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.568748674 -0500 CDT m=+20.103672849", + "state": "passed", + "testID": { + "id": "access-control-pod-host-network", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-host-path": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Hostpath path is not set\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Verifies that the spec.HostPath parameter is not set (not present)", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Set the spec.HostPath parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostPath should be false." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 31212, - "endTime": "2023-09-04 09:18:50.56886078 -0500 CDT m=+20.103784955", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.568829568 -0500 CDT m=+20.103753743", - "state": "passed", - "testID": { - "id": "access-control-pod-host-path", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Hostpath path is not set\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Verifies that the spec.HostPath parameter is not set (not present)", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Set the spec.HostPath parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostPath should be false." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 31212, + "endTime": "2023-09-04 09:18:50.56886078 -0500 CDT m=+20.103784955", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.568829568 -0500 CDT m=+20.103753743", + "state": "passed", + "testID": { + "id": "access-control-pod-host-path", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-host-pid": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Verifies that the spec.HostPid parameter is set to false", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Set the spec.HostPid parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostPid should be false." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 39791, - "endTime": "2023-09-04 09:18:50.569008556 -0500 CDT m=+20.103932731", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.568968765 -0500 CDT m=+20.103892940", - "state": "passed", - "testID": { - "id": "access-control-pod-host-pid", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"HostPid is not set to true\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Verifies that the spec.HostPid parameter is set to false", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Set the spec.HostPid parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostPid should be false." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 39791, + "endTime": "2023-09-04 09:18:50.569008556 -0500 CDT m=+20.103932731", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.568968765 -0500 CDT m=+20.103892940", + "state": "passed", + "testID": { + "id": "access-control-pod-host-pid", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-role-bindings": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"The serviceAccountName is either empty or default\",\"certsuite\",\"xdp\"]}]}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-rbac", - "description": "Ensures that a CNF does not utilize RoleBinding(s) in a non-CNF Namespace.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure the CNF is not configured to use RoleBinding(s) in a non-CNF Namespace. Scope of role must \u003c= scope of creator of role." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 452989, - "endTime": "2023-09-04 09:18:50.664046132 -0500 CDT m=+20.198970306", - "failureLineContent": "\t\tfail(string(bytes))", - "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", - "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"The serviceAccountName is either empty or default\",\"certsuite\",\"xdp\"]}]}", - "startTime": "2023-09-04 09:18:50.663593132 -0500 CDT m=+20.198517317", - "state": "failed", - "testID": { - "id": "access-control-pod-role-bindings", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"The serviceAccountName is either empty or default\",\"certsuite\",\"xdp\"]}]}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-rbac", + "description": "Ensures that a CNF does not utilize RoleBinding(s) in a non-CNF Namespace.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure the CNF is not configured to use RoleBinding(s) in a non-CNF Namespace. Scope of role must \u003c= scope of creator of role." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 452989, + "endTime": "2023-09-04 09:18:50.664046132 -0500 CDT m=+20.198970306", + "failureLineContent": "\t\tfail(string(bytes))", + "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", + "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"All the role bindings used by this pod (applied by the service accounts) live in the same namespace\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"The serviceAccountName is either empty or default\",\"certsuite\",\"xdp\"]}]}", + "startTime": "2023-09-04 09:18:50.663593132 -0500 CDT m=+20.198517317", + "state": "failed", + "testID": { + "id": "access-control-pod-role-bindings", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-service-account": { - "capturedTestOutput": "Pod [xdp certsuite] (ns: %!s(MISSING)) does not have a valid service account name.\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod does not have a valid service account name\",\"certsuite\",\"xdp\"]}]}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-scc-permissions-for-an-application", - "description": "Tests that each CNF Pod utilizes a valid Service Account. Default or empty service account is not valid.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that the each CNF Pod is configured to use a valid Service Account" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 444525, - "endTime": "2023-09-04 09:18:50.663337898 -0500 CDT m=+20.198262075", - "failureLineContent": "\t\tfail(string(bytes))", - "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", - "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod does not have a valid service account name\",\"certsuite\",\"xdp\"]}]}", - "startTime": "2023-09-04 09:18:50.662893374 -0500 CDT m=+20.197817550", - "state": "failed", - "testID": { - "id": "access-control-pod-service-account", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "Pod [xdp certsuite] (ns: %!s(MISSING)) does not have a valid service account name.\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod does not have a valid service account name\",\"certsuite\",\"xdp\"]}]}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-scc-permissions-for-an-application", + "description": "Tests that each CNF Pod utilizes a valid Service Account. Default or empty service account is not valid.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that the each CNF Pod is configured to use a valid Service Account" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 444525, + "endTime": "2023-09-04 09:18:50.663337898 -0500 CDT m=+20.198262075", + "failureLineContent": "\t\tfail(string(bytes))", + "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", + "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod has a service account name\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod does not have a valid service account name\",\"certsuite\",\"xdp\"]}]}", + "startTime": "2023-09-04 09:18:50.662893374 -0500 CDT m=+20.197817550", + "state": "failed", + "testID": { + "id": "access-control-pod-service-account", + "suite": "access-control", + "tags": "common" + } + }, "access-control-projected-volume-service-account-token": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"ProjectedVolume\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Projected Volume Name\",\"Projected Volume SA Token\",\"Projected Volume SA Token\",\"Projected Volume SA Token\",\"Projected Volume SA Token\"],\"ObjectFieldsValues\":[\"the projected volume Service account token field is not nil\",\"certsuite\",\"xdp\",\"kube-api-access-t8lpx\",\"\\u0026ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,}\",\"nil\",\"nil\",\"nil\"]}]}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods", - "description": "Checks that pods do not use projected volumes and service account tokens", - "exceptionProcess": "Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.", - "remediation": "Ensure that pods do not use projected volumes and service account tokens" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 298494, - "endTime": "2023-09-04 09:19:06.65744981 -0500 CDT m=+36.192374005", - "failureLineContent": "\t\tfail(string(bytes))", - "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", - "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"ProjectedVolume\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Projected Volume Name\",\"Projected Volume SA Token\",\"Projected Volume SA Token\",\"Projected Volume SA Token\",\"Projected Volume SA Token\"],\"ObjectFieldsValues\":[\"the projected volume Service account token field is not nil\",\"certsuite\",\"xdp\",\"kube-api-access-t8lpx\",\"\\u0026ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,}\",\"nil\",\"nil\",\"nil\"]}]}", - "startTime": "2023-09-04 09:19:06.657151337 -0500 CDT m=+36.192075511", - "state": "failed", - "testID": { - "id": "access-control-projected-volume-service-account-token", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"ProjectedVolume\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Projected Volume Name\",\"Projected Volume SA Token\",\"Projected Volume SA Token\",\"Projected Volume SA Token\",\"Projected Volume SA Token\"],\"ObjectFieldsValues\":[\"the projected volume Service account token field is not nil\",\"certsuite\",\"xdp\",\"kube-api-access-t8lpx\",\"\\u0026ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,}\",\"nil\",\"nil\",\"nil\"]}]}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods", + "description": "Checks that pods do not use projected volumes and service account tokens", + "exceptionProcess": "Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.", + "remediation": "Ensure that pods do not use projected volumes and service account tokens" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 298494, + "endTime": "2023-09-04 09:19:06.65744981 -0500 CDT m=+36.192374005", + "failureLineContent": "\t\tfail(string(bytes))", + "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", + "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"the pod is not using a projected volume for service account access\",\"certsuite\",\"test-765d6b8dcf-s768n\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"ProjectedVolume\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Projected Volume Name\",\"Projected Volume SA Token\",\"Projected Volume SA Token\",\"Projected Volume SA Token\",\"Projected Volume SA Token\"],\"ObjectFieldsValues\":[\"the projected volume Service account token field is not nil\",\"certsuite\",\"xdp\",\"kube-api-access-t8lpx\",\"\\u0026ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,}\",\"nil\",\"nil\",\"nil\"]}]}", + "startTime": "2023-09-04 09:19:06.657151337 -0500 CDT m=+36.192075511", + "state": "failed", + "testID": { + "id": "access-control-projected-volume-service-account-token", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-requests-and-limits": { - "capturedTestOutput": "Container has been found missing resource limits: [container: xdp-c pod: xdp ns: certsuite]\nContainer has been found missing resource requests: [container: xdp-c pod: xdp ns: certsuite]\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is missing resource requests or limits\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requests/limits", - "description": "Check that containers have resource requests and limits specified in their spec.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Add requests and limits to your container spec. See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 302278, - "endTime": "2023-09-04 09:19:06.656636052 -0500 CDT m=+36.191560227", - "failureLineContent": "\t\tfail(string(bytes))", - "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", - "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is missing resource requests or limits\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}", - "startTime": "2023-09-04 09:19:06.656333774 -0500 CDT m=+36.191257949", - "state": "failed", - "testID": { - "id": "access-control-requests-and-limits", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "Container has been found missing resource limits: [container: xdp-c pod: xdp ns: certsuite]\nContainer has been found missing resource requests: [container: xdp-c pod: xdp ns: certsuite]\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is missing resource requests or limits\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requests/limits", + "description": "Check that containers have resource requests and limits specified in their spec.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Add requests and limits to your container spec. See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 302278, + "endTime": "2023-09-04 09:19:06.656636052 -0500 CDT m=+36.191560227", + "failureLineContent": "\t\tfail(string(bytes))", + "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", + "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container has resource requests and limits\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is missing resource requests or limits\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}", + "startTime": "2023-09-04 09:19:06.656333774 -0500 CDT m=+36.191257949", + "state": "failed", + "testID": { + "id": "access-control-requests-and-limits", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-security-context": { - "capturedTestOutput": "containerSCC [container: test pod: test-0 ns: certsuite {false false false false false true false true false true true true CategoryID1(limited access granted automatically) true true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [true] - OK\nRunAsNonRoot = [true false] but expected %!s(MISSING) - NOK\nFsGroupPresent = [true] - OK\nDropCapabilities list - OK\n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID1(limited access granted automatically)] - OK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [true] - OK\nRunAsNonRoot = [true] - OK\nFsGroupPresent = [true] - OK\nDropCapabilities list - OK\n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID1(limited access granted automatically)] - OK\nTesting if pod belongs to category1NoUID0 \n%!(EXTRA []interface {}=[])containerSCC [container: test pod: test-1 ns: certsuite {false false false false false true false true false true true true CategoryID1(limited access granted automatically) true true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [true] - OK\nRunAsNonRoot = [true false] but expected %!s(MISSING) - NOK\nFsGroupPresent = [true] - OK\nDropCapabilities list - OK\n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID1(limited access granted automatically)] - OK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [true] - OK\nRunAsNonRoot = [true] - OK\nFsGroupPresent = [true] - OK\nDropCapabilities list - OK\n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID1(limited access granted automatically)] - OK\nTesting if pod belongs to category1NoUID0 \n%!(EXTRA []interface {}=[])containerSCC [container: test pod: test-765d6b8dcf-gbvsd ns: certsuite {false false false false false false false false false false false true CategoryID2(advanced networking (vlan tag, dscp, priority)) false true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID2(advanced networking (vlan tag, dscp, priority))]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID2(advanced networking (vlan tag, dscp, priority))] - OK\nTesting if pod belongs to category [CategoryID3(SRIOV and DPDK)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID3(SRIOV and DPDK)] but expected %!s(MISSING) - NOK\ncontainerSCC [container: test pod: test-765d6b8dcf-s768n ns: certsuite {false false false false false false false false false false false true CategoryID2(advanced networking (vlan tag, dscp, priority)) false true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID2(advanced networking (vlan tag, dscp, priority))]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID2(advanced networking (vlan tag, dscp, priority))] - OK\nTesting if pod belongs to category [CategoryID3(SRIOV and DPDK)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID3(SRIOV and DPDK)] but expected %!s(MISSING) - NOK\ncontainerSCC [container: xdp-c pod: xdp ns: certsuite {false false false false false false false false false false false false CategoryID4(anything not matching lower category) false true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent = [false true] but expected %!s(MISSING) expected to be non nil - NOK\nCapabilitiesCategory = [CategoryID4(anything not matching lower category) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent = [false true] but expected %!s(MISSING) expected to be non nil - NOK\nCapabilitiesCategory = [CategoryID4(anything not matching lower category) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID2(advanced networking (vlan tag, dscp, priority))]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent = [false true] but expected %!s(MISSING) expected to be non nil - NOK\nCapabilitiesCategory = [CategoryID4(anything not matching lower category) CategoryID2(advanced networking (vlan tag, dscp, priority))] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID3(SRIOV and DPDK)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent = [false true] but expected %!s(MISSING) expected to be non nil - NOK\nCapabilitiesCategory = [CategoryID4(anything not matching lower category) CategoryID3(SRIOV and DPDK)] but expected %!s(MISSING) - NOK\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is category 1 or category NoUID0\",\"certsuite\",\"test-0\",\"test\",\"CategoryID1NoUID0(automatically granted, basic rights with mesh networks)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is category 1 or category NoUID0\",\"certsuite\",\"test-1\",\"test\",\"CategoryID1NoUID0(automatically granted, basic rights with mesh networks)\"]},{\"ObjectType\":\"Cnf\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Category\"],\"ObjectFieldsValues\":[\"Overall CNF category\",\"CategoryID4(anything not matching lower category)\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\",\"CategoryID4(anything not matching lower category)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\",\"CategoryID4(anything not matching lower category)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"xdp\",\"xdp-c\",\"CategoryID4(anything not matching lower category)\"]}]}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Checks the security context matches one of the 4 categories", - "exceptionProcess": "no exception needed for optional/extended test", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and document why. If the container had the right configuration of the allowed category from the 4 approved list then the test will pass. The 4 categories are defined in Requirement ID 94118 of the Extended Best Practices guide (private repo)" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 1071152, - "endTime": "2023-09-04 09:18:50.567381737 -0500 CDT m=+20.102305911", - "failureLineContent": "\t\tfail(string(bytes))", - "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", - "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is category 1 or category NoUID0\",\"certsuite\",\"test-0\",\"test\",\"CategoryID1NoUID0(automatically granted, basic rights with mesh networks)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is category 1 or category NoUID0\",\"certsuite\",\"test-1\",\"test\",\"CategoryID1NoUID0(automatically granted, basic rights with mesh networks)\"]},{\"ObjectType\":\"Cnf\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Category\"],\"ObjectFieldsValues\":[\"Overall CNF category\",\"CategoryID4(anything not matching lower category)\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\",\"CategoryID4(anything not matching lower category)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\",\"CategoryID4(anything not matching lower category)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"xdp\",\"xdp-c\",\"CategoryID4(anything not matching lower category)\"]}]}", - "startTime": "2023-09-04 09:18:50.566310584 -0500 CDT m=+20.101234759", - "state": "failed", - "testID": { - "id": "access-control-security-context", - "suite": "access-control", - "tags": "extended" - } + "capturedTestOutput": "containerSCC [container: test pod: test-0 ns: certsuite {false false false false false true false true false true true true CategoryID1(limited access granted automatically) true true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [true] - OK\nRunAsNonRoot = [true false] but expected %!s(MISSING) - NOK\nFsGroupPresent = [true] - OK\nDropCapabilities list - OK\n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID1(limited access granted automatically)] - OK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [true] - OK\nRunAsNonRoot = [true] - OK\nFsGroupPresent = [true] - OK\nDropCapabilities list - OK\n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID1(limited access granted automatically)] - OK\nTesting if pod belongs to category1NoUID0 \n%!(EXTRA []interface {}=[])containerSCC [container: test pod: test-1 ns: certsuite {false false false false false true false true false true true true CategoryID1(limited access granted automatically) true true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [true] - OK\nRunAsNonRoot = [true false] but expected %!s(MISSING) - NOK\nFsGroupPresent = [true] - OK\nDropCapabilities list - OK\n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID1(limited access granted automatically)] - OK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [true] - OK\nRunAsNonRoot = [true] - OK\nFsGroupPresent = [true] - OK\nDropCapabilities list - OK\n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID1(limited access granted automatically)] - OK\nTesting if pod belongs to category1NoUID0 \n%!(EXTRA []interface {}=[])containerSCC [container: test pod: test-765d6b8dcf-gbvsd ns: certsuite {false false false false false false false false false false false true CategoryID2(advanced networking (vlan tag, dscp, priority)) false true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID2(advanced networking (vlan tag, dscp, priority))]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID2(advanced networking (vlan tag, dscp, priority))] - OK\nTesting if pod belongs to category [CategoryID3(SRIOV and DPDK)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID3(SRIOV and DPDK)] but expected %!s(MISSING) - NOK\ncontainerSCC [container: test pod: test-765d6b8dcf-s768n ns: certsuite {false false false false false false false false false false false true CategoryID2(advanced networking (vlan tag, dscp, priority)) false true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID2(advanced networking (vlan tag, dscp, priority))]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory list is as expected [CategoryID2(advanced networking (vlan tag, dscp, priority))] - OK\nTesting if pod belongs to category [CategoryID3(SRIOV and DPDK)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent is not nil - OK\n%!(EXTRA []interface {}=[])CapabilitiesCategory = [CategoryID2(advanced networking (vlan tag, dscp, priority)) CategoryID3(SRIOV and DPDK)] but expected %!s(MISSING) - NOK\ncontainerSCC [container: xdp-c pod: xdp ns: certsuite {false false false false false false false false false false false false CategoryID4(anything not matching lower category) false true}] is %!v(MISSING)\nTesting if pod belongs to category [CategoryID1(limited access granted automatically)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent = [false true] but expected %!s(MISSING) expected to be non nil - NOK\nCapabilitiesCategory = [CategoryID4(anything not matching lower category) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID1NoUID0(automatically granted, basic rights with mesh networks)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent = [false true] but expected %!s(MISSING) expected to be non nil - NOK\nCapabilitiesCategory = [CategoryID4(anything not matching lower category) CategoryID1(limited access granted automatically)] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID2(advanced networking (vlan tag, dscp, priority))]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent = [false true] but expected %!s(MISSING) expected to be non nil - NOK\nCapabilitiesCategory = [CategoryID4(anything not matching lower category) CategoryID2(advanced networking (vlan tag, dscp, priority))] but expected %!s(MISSING) - NOK\nTesting if pod belongs to category [CategoryID3(SRIOV and DPDK)]\nAllVolumeAllowed = [true] - OK\nRunAsUserPresent = [false true] but expected %!s(MISSING) - NOK\nRunAsNonRoot = [false] - OK\nFsGroupPresent = [false true] but expected %!s(MISSING) - NOK\nRequiredDropCapabilitiesPresent = [false true] but expected %!s(MISSING) - NOK\nits didnt have all the required (MKNOD, SETUID, SETGID, KILL)/(ALL) drop value \n%!(EXTRA []interface {}=[])HostDirVolumePluginPresent = [false] - OK\nHostIPC = [false] - OK\nHostNetwork = [false] - OK\nHostPID = [false] - OK\nHostPorts = [false] - OK\nHostNetwork = [false] - OK\nPrivilegedContainer = [false] - OK\nReadOnlyRootFilesystem = [false] - OK\nSeLinuxContextPresent = [false true] but expected %!s(MISSING) expected to be non nil - NOK\nCapabilitiesCategory = [CategoryID4(anything not matching lower category) CategoryID3(SRIOV and DPDK)] but expected %!s(MISSING) - NOK\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is category 1 or category NoUID0\",\"certsuite\",\"test-0\",\"test\",\"CategoryID1NoUID0(automatically granted, basic rights with mesh networks)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is category 1 or category NoUID0\",\"certsuite\",\"test-1\",\"test\",\"CategoryID1NoUID0(automatically granted, basic rights with mesh networks)\"]},{\"ObjectType\":\"Cnf\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Category\"],\"ObjectFieldsValues\":[\"Overall CNF category\",\"CategoryID4(anything not matching lower category)\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\",\"CategoryID4(anything not matching lower category)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\",\"CategoryID4(anything not matching lower category)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"xdp\",\"xdp-c\",\"CategoryID4(anything not matching lower category)\"]}]}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Checks the security context matches one of the 4 categories", + "exceptionProcess": "no exception needed for optional/extended test", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and document why. If the container had the right configuration of the allowed category from the 4 approved list then the test will pass. The 4 categories are defined in Requirement ID 94118 of the Extended Best Practices guide (private repo)" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 1071152, + "endTime": "2023-09-04 09:18:50.567381737 -0500 CDT m=+20.102305911", + "failureLineContent": "\t\tfail(string(bytes))", + "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", + "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is category 1 or category NoUID0\",\"certsuite\",\"test-0\",\"test\",\"CategoryID1NoUID0(automatically granted, basic rights with mesh networks)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is category 1 or category NoUID0\",\"certsuite\",\"test-1\",\"test\",\"CategoryID1NoUID0(automatically granted, basic rights with mesh networks)\"]},{\"ObjectType\":\"Cnf\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Category\"],\"ObjectFieldsValues\":[\"Overall CNF category\",\"CategoryID4(anything not matching lower category)\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\",\"CategoryID4(anything not matching lower category)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\",\"CategoryID4(anything not matching lower category)\"]},{\"ObjectType\":\"ContainerCategory\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\",\"Category\"],\"ObjectFieldsValues\":[\"container category is NOT category 1 or category NoUID0\",\"certsuite\",\"xdp\",\"xdp-c\",\"CategoryID4(anything not matching lower category)\"]}]}", + "startTime": "2023-09-04 09:18:50.566310584 -0500 CDT m=+20.101234759", + "state": "failed", + "testID": { + "id": "access-control-security-context", + "suite": "access-control", + "tags": "extended" + } + }, "access-control-security-context-non-root-user-check": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Checks the security context runAsUser parameter in pods and containers to make sure it is not set to uid root(0). Pods and containers should not run as root (runAsUser is not set to uid0).", - "exceptionProcess": "No exceptions - will only be considered under special circumstances. Must identify which container needs access and document why with details.", - "remediation": "Change the pod and containers \"runAsUser\" uid to something other than root(0)" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 38277, - "endTime": "2023-09-04 09:18:50.568537035 -0500 CDT m=+20.103461209", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.568498757 -0500 CDT m=+20.103422932", - "state": "passed", - "testID": { - "id": "access-control-security-context-non-root-user-check", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Root User not detected (RunAsUser uid=0)\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Checks the security context runAsUser parameter in pods and containers to make sure it is not set to uid root(0). Pods and containers should not run as root (runAsUser is not set to uid0).", + "exceptionProcess": "No exceptions - will only be considered under special circumstances. Must identify which container needs access and document why with details.", + "remediation": "Change the pod and containers \"runAsUser\" uid to something other than root(0)" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 38277, + "endTime": "2023-09-04 09:18:50.568537035 -0500 CDT m=+20.103461209", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.568498757 -0500 CDT m=+20.103422932", + "state": "passed", + "testID": { + "id": "access-control-security-context-non-root-user-check", + "suite": "access-control", + "tags": "common" + } + }, "access-control-security-context-privilege-escalation": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"AllowPrivilegeEscalation is set to false\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"AllowPrivilegeEscalation is set to false\",\"certsuite\",\"test-1\",\"test\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Checks if privileged escalation is enabled (AllowPrivilegeEscalation=true).", - "exceptionProcess": "No exceptions", - "remediation": "Configure privilege escalation to false. Privileged escalation should not be allowed (AllowPrivilegeEscalation=false)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 26433, - "endTime": "2023-09-04 09:18:50.56861147 -0500 CDT m=+20.103535645", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.568585037 -0500 CDT m=+20.103509212", - "state": "passed", - "testID": { - "id": "access-control-security-context-privilege-escalation", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"AllowPrivilegeEscalation is set to false\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"AllowPrivilegeEscalation is set to false\",\"certsuite\",\"test-1\",\"test\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Checks if privileged escalation is enabled (AllowPrivilegeEscalation=true).", + "exceptionProcess": "No exceptions", + "remediation": "Configure privilege escalation to false. Privileged escalation should not be allowed (AllowPrivilegeEscalation=false)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 26433, + "endTime": "2023-09-04 09:18:50.56861147 -0500 CDT m=+20.103535645", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.568585037 -0500 CDT m=+20.103509212", + "state": "passed", + "testID": { + "id": "access-control-security-context-privilege-escalation", + "suite": "access-control", + "tags": "common" + } + }, "access-control-service-type": { - "capturedTestOutput": "{\"CompliantObjectsOut\":null,\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-the-host-network-namespace", - "description": "Tests that each CNF Service does not utilize NodePort(s).", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Ensure Services are not configured to use NodePort(s).CNF should avoid accessing host resources - tests that each CNF Service does not utilize NodePort(s)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 64098, - "endTime": "2023-09-04 09:19:06.657747422 -0500 CDT m=+36.192671612", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.657683301 -0500 CDT m=+36.192607514", - "state": "passed", - "testID": { - "id": "access-control-service-type", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":null,\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-the-host-network-namespace", + "description": "Tests that each CNF Service does not utilize NodePort(s).", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Ensure Services are not configured to use NodePort(s).CNF should avoid accessing host resources - tests that each CNF Service does not utilize NodePort(s)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 64098, + "endTime": "2023-09-04 09:19:06.657747422 -0500 CDT m=+36.192671612", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.657683301 -0500 CDT m=+36.192607514", + "state": "passed", + "testID": { + "id": "access-control-service-type", + "suite": "access-control", + "tags": "common" + } + }, "access-control-ssh-daemons": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-interaction/configuration", - "description": "Check that pods do not run SSH daemons.", - "exceptionProcess": "No exceptions - special consideration can be given to certain containers which run as utility tool daemon", - "remediation": "Ensure that no SSH daemons are running inside a pod. Pods should not run as SSH Daemons (replicaset or statefulset only)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 7839353399, - "endTime": "2023-09-04 09:19:06.656166437 -0500 CDT m=+36.191090610", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:58.816813038 -0500 CDT m=+28.351737211", - "state": "passed", - "testID": { - "id": "access-control-ssh-daemons", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"test-0\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"test-1\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"test-765d6b8dcf-gbvsd\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"test-765d6b8dcf-s768n\"]},{\"ObjectType\":\"Pod\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\"],\"ObjectFieldsValues\":[\"Pod is not running an SSH daemon\",\"certsuite\",\"xdp\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-interaction/configuration", + "description": "Check that pods do not run SSH daemons.", + "exceptionProcess": "No exceptions - special consideration can be given to certain containers which run as utility tool daemon", + "remediation": "Ensure that no SSH daemons are running inside a pod. Pods should not run as SSH Daemons (replicaset or statefulset only)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 7839353399, + "endTime": "2023-09-04 09:19:06.656166437 -0500 CDT m=+36.191090610", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:58.816813038 -0500 CDT m=+28.351737211", + "state": "passed", + "testID": { + "id": "access-control-ssh-daemons", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-sys-admin-capability-check": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-sys_admin", - "description": "Ensures that containers do not use SYS_ADMIN capability", - "exceptionProcess": "No exceptions", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. Containers should not use the SYS_ADMIN Linux capability." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 108954, - "endTime": "2023-09-04 09:18:50.567648143 -0500 CDT m=+20.102572317", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.567539188 -0500 CDT m=+20.102463363", - "state": "passed", - "testID": { - "id": "access-control-sys-admin-capability-check", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"No forbidden capabilities detected in container\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-sys_admin", + "description": "Ensures that containers do not use SYS_ADMIN capability", + "exceptionProcess": "No exceptions", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. Containers should not use the SYS_ADMIN Linux capability." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 108954, + "endTime": "2023-09-04 09:18:50.567648143 -0500 CDT m=+20.102572317", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.567539188 -0500 CDT m=+20.102463363", + "state": "passed", + "testID": { + "id": "access-control-sys-admin-capability-check", + "suite": "access-control", + "tags": "common" + } + }, "access-control-sys-nice-realtime-capability": { - "capturedTestOutput": "[container: test pod: test-0 ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n[container: test pod: test-1 ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n[container: test pod: test-765d6b8dcf-gbvsd ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n[container: test pod: test-765d6b8dcf-s768n ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n[container: xdp-c pod: xdp ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n{\"CompliantObjectsOut\":null,\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_nice", - "description": "Check that pods running on nodes with realtime kernel enabled have the SYS_NICE capability enabled in their spec. In the case that a CNF is running on a node using the real-time kernel, SYS_NICE will be used to allow DPDK application to switch to SCHED_FIFO.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "If pods are scheduled to realtime kernel nodes, they must add SYS_NICE capability to their spec." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 343115, - "endTime": "2023-09-04 09:18:58.816106528 -0500 CDT m=+28.351030702", - "failureLineContent": "\t\tfail(string(bytes))", - "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", - "skipReason": "{\"CompliantObjectsOut\":null,\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}", - "startTime": "2023-09-04 09:18:58.815763413 -0500 CDT m=+28.350687587", - "state": "failed", - "testID": { - "id": "access-control-sys-nice-realtime-capability", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "[container: test pod: test-0 ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n[container: test pod: test-1 ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n[container: test pod: test-765d6b8dcf-gbvsd ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n[container: test pod: test-765d6b8dcf-s768n ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n[container: xdp-c pod: xdp ns: certsuite] has been found running on a realtime kernel enabled node without SYS_NICE capability.\n{\"CompliantObjectsOut\":null,\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_nice", + "description": "Check that pods running on nodes with realtime kernel enabled have the SYS_NICE capability enabled in their spec. In the case that a CNF is running on a node using the real-time kernel, SYS_NICE will be used to allow DPDK application to switch to SCHED_FIFO.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "If pods are scheduled to realtime kernel nodes, they must add SYS_NICE capability to their spec." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 343115, + "endTime": "2023-09-04 09:18:58.816106528 -0500 CDT m=+28.351030702", + "failureLineContent": "\t\tfail(string(bytes))", + "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:367", + "skipReason": "{\"CompliantObjectsOut\":null,\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Container is running on a realtime kernel enabled node without SYS_NICE capability\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}", + "startTime": "2023-09-04 09:18:58.815763413 -0500 CDT m=+28.350687587", + "state": "failed", + "testID": { + "id": "access-control-sys-nice-realtime-capability", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-sys-ptrace-capability": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_ptrace", - "description": "Check that if process namespace sharing is enabled for a Pod then the SYS_PTRACE capability is allowed. This capability is required when using Process Namespace Sharing. This is used when processes from one Container need to be exposed to another Container. For example, to send signals like SIGHUP from a process in a Container to another process in another Container. For more information on these capabilities refer to https://cloud.redhat.com/blog/linux-capabilities-in-openshift and https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Allow the SYS_PTRACE capability when enabling process namespace sharing for a Pod" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 238782, - "endTime": "2023-09-04 09:18:58.816543639 -0500 CDT m=+28.351467813", - "failureLineContent": "\t\t\tskip(fmt.Sprintf(\"Test skipped because there are no %s to test, please check under test labels\", reflect.TypeOf(o)))", - "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:321", - "skipReason": "Test skipped because there are no []*provider.Pod to test, please check under test labels", - "startTime": "2023-09-04 09:18:58.816304856 -0500 CDT m=+28.351229031", - "state": "skipped", - "testID": { - "id": "access-control-sys-ptrace-capability", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_ptrace", + "description": "Check that if process namespace sharing is enabled for a Pod then the SYS_PTRACE capability is allowed. This capability is required when using Process Namespace Sharing. This is used when processes from one Container need to be exposed to another Container. For example, to send signals like SIGHUP from a process in a Container to another process in another Container. For more information on these capabilities refer to https://cloud.redhat.com/blog/linux-capabilities-in-openshift and https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Allow the SYS_PTRACE capability when enabling process namespace sharing for a Pod" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 238782, + "endTime": "2023-09-04 09:18:58.816543639 -0500 CDT m=+28.351467813", + "failureLineContent": "\t\t\tskip(fmt.Sprintf(\"Test skipped because there are no %s to test, please check under test labels\", reflect.TypeOf(o)))", + "failureLocation": "/home/greyerof/github/certsuite/pkg/testhelper/testhelper.go:321", + "skipReason": "Test skipped because there are no []*provider.Pod to test, please check under test labels", + "startTime": "2023-09-04 09:18:58.816304856 -0500 CDT m=+28.351229031", + "state": "skipped", + "testID": { + "id": "access-control-sys-ptrace-capability", + "suite": "access-control", + "tags": "telco" + } + }, "affiliated-certification-container-is-certified": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", - "description": "Tests whether container images listed in the configuration file have passed the Red Hat Container Certification Program (CCP).", - "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", - "remediation": "Ensure that your container has passed the Red Hat Container Certification Program (CCP)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.658351027 -0500 CDT m=+36.193275232", - "state": "skipped", - "testID": { - "id": "affiliated-certification-container-is-certified", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", + "description": "Tests whether container images listed in the configuration file have passed the Red Hat Container Certification Program (CCP).", + "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", + "remediation": "Ensure that your container has passed the Red Hat Container Certification Program (CCP)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.658351027 -0500 CDT m=+36.193275232", + "state": "skipped", + "testID": { + "id": "affiliated-certification-container-is-certified", + "suite": "affiliated-certification", + "tags": "common" + } + }, "affiliated-certification-container-is-certified-digest": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", - "description": "Tests whether container images that are autodiscovered have passed the Red Hat Container Certification Program by their digest(CCP).", - "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", - "remediation": "Ensure that your container has passed the Red Hat Container Certification Program (CCP)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.658473669 -0500 CDT m=+36.193397844", - "state": "skipped", - "testID": { - "id": "affiliated-certification-container-is-certified-digest", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", + "description": "Tests whether container images that are autodiscovered have passed the Red Hat Container Certification Program by their digest(CCP).", + "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", + "remediation": "Ensure that your container has passed the Red Hat Container Certification Program (CCP)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.658473669 -0500 CDT m=+36.193397844", + "state": "skipped", + "testID": { + "id": "affiliated-certification-container-is-certified-digest", + "suite": "affiliated-certification", + "tags": "common" + } + }, "affiliated-certification-helm-version": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-helm", - "description": "Test to check if the helm chart is v3", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Check Helm Chart is v3 and not v2 which is not supported due to security risks associated with Tiller." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.658299035 -0500 CDT m=+36.193223210", - "state": "skipped", - "testID": { - "id": "affiliated-certification-helm-version", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-helm", + "description": "Test to check if the helm chart is v3", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Check Helm Chart is v3 and not v2 which is not supported due to security risks associated with Tiller." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.658299035 -0500 CDT m=+36.193223210", + "state": "skipped", + "testID": { + "id": "affiliated-certification-helm-version", + "suite": "affiliated-certification", + "tags": "common" + } + }, "affiliated-certification-helmchart-is-certified": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", - "description": "Tests whether helm charts listed in the cluster passed the Red Hat Helm Certification Program.", - "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", - "remediation": "Ensure that the helm charts under test passed the Red Hat's helm Certification Program (e.g. listed in https://charts.openshift.io/index.yaml)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.658444851 -0500 CDT m=+36.193369056", - "state": "skipped", - "testID": { - "id": "affiliated-certification-helmchart-is-certified", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", + "description": "Tests whether helm charts listed in the cluster passed the Red Hat Helm Certification Program.", + "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", + "remediation": "Ensure that the helm charts under test passed the Red Hat's helm Certification Program (e.g. listed in https://charts.openshift.io/index.yaml)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.658444851 -0500 CDT m=+36.193369056", + "state": "skipped", + "testID": { + "id": "affiliated-certification-helmchart-is-certified", + "suite": "affiliated-certification", + "tags": "common" + } + }, "affiliated-certification-operator-is-certified": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "Tests whether CNF Operators listed in the configuration file have passed the Red Hat Operator Certification Program (OCP).", - "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", - "remediation": "Ensure that your Operator has passed Red Hat's Operator Certification Program (OCP)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.658400499 -0500 CDT m=+36.193324704", - "state": "skipped", - "testID": { - "id": "affiliated-certification-operator-is-certified", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "Tests whether CNF Operators listed in the configuration file have passed the Red Hat Operator Certification Program (OCP).", + "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", + "remediation": "Ensure that your Operator has passed Red Hat's Operator Certification Program (OCP)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.658400499 -0500 CDT m=+36.193324704", + "state": "skipped", + "testID": { + "id": "affiliated-certification-operator-is-certified", + "suite": "affiliated-certification", + "tags": "common" + } + }, "lifecycle-affinity-required-pods": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Checks that affinity rules are in place if AffinityRequired: 'true' labels are set on Pods.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Pods which need to be co-located on the same node need Affinity rules. If a pod/statefulset/deployment is required to use affinity rules, please add AffinityRequired: 'true' as a label." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.56619026 -0500 CDT m=+20.101114435", - "state": "skipped", - "testID": { - "id": "lifecycle-affinity-required-pods", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Checks that affinity rules are in place if AffinityRequired: 'true' labels are set on Pods.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Pods which need to be co-located on the same node need Affinity rules. If a pod/statefulset/deployment is required to use affinity rules, please add AffinityRequired: 'true' as a label." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.56619026 -0500 CDT m=+20.101114435", + "state": "skipped", + "testID": { + "id": "lifecycle-affinity-required-pods", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-container-shutdown": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices", - "description": "Ensure that the containers lifecycle preStop management feature is configured. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. When pods are shut down by the platform they are sent a SIGTERM signal which means that the process in the container should start shutting down, closing connections and stopping all activity. If the pod doesn’t shut down within the default 30 seconds then the platform may send a SIGKILL signal which will stop the pod immediately. This method isn’t as clean and the default time between the SIGTERM and SIGKILL messages can be modified based on the requirements of the application. Containers should respond to SIGTERM/SIGKILL with graceful shutdown.", - "exceptionProcess": "Identify which pod is not conforming to the process and submit information as to why it cannot use a preStop shutdown specification.", - "remediation": "The preStop can be used to gracefully stop the container and clean resources (e.g., DB connection). For details, see https://www.containiq.com/post/kubernetes-container-lifecycle-events-and-hooks and https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. All pods must respond to SIGTERM signal and shutdown gracefully with a zero exit code." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565894087 -0500 CDT m=+20.100818277", - "state": "skipped", - "testID": { - "id": "lifecycle-container-shutdown", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices", + "description": "Ensure that the containers lifecycle preStop management feature is configured. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. When pods are shut down by the platform they are sent a SIGTERM signal which means that the process in the container should start shutting down, closing connections and stopping all activity. If the pod doesn’t shut down within the default 30 seconds then the platform may send a SIGKILL signal which will stop the pod immediately. This method isn’t as clean and the default time between the SIGTERM and SIGKILL messages can be modified based on the requirements of the application. Containers should respond to SIGTERM/SIGKILL with graceful shutdown.", + "exceptionProcess": "Identify which pod is not conforming to the process and submit information as to why it cannot use a preStop shutdown specification.", + "remediation": "The preStop can be used to gracefully stop the container and clean resources (e.g., DB connection). For details, see https://www.containiq.com/post/kubernetes-container-lifecycle-events-and-hooks and https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. All pods must respond to SIGTERM signal and shutdown gracefully with a zero exit code." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565894087 -0500 CDT m=+20.100818277", + "state": "skipped", + "testID": { + "id": "lifecycle-container-shutdown", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-container-startup": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices", - "description": "Ensure that the containers lifecycle postStart management feature is configured. A container must receive important events from the platform and conform/react to these events properly. For example, a container should catch SIGTERM or SIGKILL from the platform and shutdown as quickly as possible. Other typically important events from the platform are PostStart to initialize before servicing requests and PreStop to release resources cleanly before shutting down.", - "exceptionProcess": "Identify which pod is not conforming to the process and submit information as to why it cannot use a postStart startup specification.", - "remediation": "PostStart is normally used to configure the container, set up dependencies, and record the new creation. You could use this event to check that a required API is available before the container’s main work begins. Kubernetes will not change the container’s state to Running until the PostStart script has executed successfully. For details, see https://www.containiq.com/post/kubernetes-container-lifecycle-events-and-hooks and https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. PostStart is used to configure container, set up dependencies, record new creation. It can also be used to check that a required API is available before the container’s work begins." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565931799 -0500 CDT m=+20.100855973", - "state": "skipped", - "testID": { - "id": "lifecycle-container-startup", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices", + "description": "Ensure that the containers lifecycle postStart management feature is configured. A container must receive important events from the platform and conform/react to these events properly. For example, a container should catch SIGTERM or SIGKILL from the platform and shutdown as quickly as possible. Other typically important events from the platform are PostStart to initialize before servicing requests and PreStop to release resources cleanly before shutting down.", + "exceptionProcess": "Identify which pod is not conforming to the process and submit information as to why it cannot use a postStart startup specification.", + "remediation": "PostStart is normally used to configure the container, set up dependencies, and record the new creation. You could use this event to check that a required API is available before the container’s main work begins. Kubernetes will not change the container’s state to Running until the PostStart script has executed successfully. For details, see https://www.containiq.com/post/kubernetes-container-lifecycle-events-and-hooks and https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. PostStart is used to configure container, set up dependencies, record new creation. It can also be used to check that a required API is available before the container’s work begins." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565931799 -0500 CDT m=+20.100855973", + "state": "skipped", + "testID": { + "id": "lifecycle-container-startup", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-cpu-isolation": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-isolation", - "description": "CPU isolation requires: For each container within the pod, resource requests and limits must be identical. Request and Limits are in the form of whole CPUs. The runTimeClassName must be specified. Annotations required disabling CPU and IRQ load-balancing.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "CPU isolation testing is enabled. Please ensure that all pods adhere to the CPU isolation requirements." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566169662 -0500 CDT m=+20.101093852", - "state": "skipped", - "testID": { - "id": "lifecycle-cpu-isolation", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-isolation", + "description": "CPU isolation requires: For each container within the pod, resource requests and limits must be identical. Request and Limits are in the form of whole CPUs. The runTimeClassName must be specified. Annotations required disabling CPU and IRQ load-balancing.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "CPU isolation testing is enabled. Please ensure that all pods adhere to the CPU isolation requirements." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566169662 -0500 CDT m=+20.101093852", + "state": "skipped", + "testID": { + "id": "lifecycle-cpu-isolation", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-crd-scaling": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Tests that CNF crd support scale in/out operations. First, the test starts getting the current replicaCount (N) of the crd/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the crd/s. In case of crd that are managed by HPA the test is changing the min and max value to crd Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the crd/s", - "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", - "remediation": "Ensure CNF crd/replica sets can scale in/out successfully." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565912421 -0500 CDT m=+20.100836595", - "state": "skipped", - "testID": { - "id": "lifecycle-crd-scaling", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Tests that CNF crd support scale in/out operations. First, the test starts getting the current replicaCount (N) of the crd/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the crd/s. In case of crd that are managed by HPA the test is changing the min and max value to crd Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the crd/s", + "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", + "remediation": "Ensure CNF crd/replica sets can scale in/out successfully." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565912421 -0500 CDT m=+20.100836595", + "state": "skipped", + "testID": { + "id": "lifecycle-crd-scaling", + "suite": "lifecycle", + "tags": "common" + } + }, "lifecycle-deployment-scaling": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Tests that CNF deployments support scale in/out operations. First, the test starts getting the current replicaCount (N) of the deployment/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the deployment/s. In case of deployments that are managed by HPA the test is changing the min and max value to deployment Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the deployment/s", - "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", - "remediation": "Ensure CNF deployments/replica sets can scale in/out successfully." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566110476 -0500 CDT m=+20.101034651", - "state": "skipped", - "testID": { - "id": "lifecycle-deployment-scaling", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Tests that CNF deployments support scale in/out operations. First, the test starts getting the current replicaCount (N) of the deployment/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the deployment/s. In case of deployments that are managed by HPA the test is changing the min and max value to deployment Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the deployment/s", + "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", + "remediation": "Ensure CNF deployments/replica sets can scale in/out successfully." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566110476 -0500 CDT m=+20.101034651", + "state": "skipped", + "testID": { + "id": "lifecycle-deployment-scaling", + "suite": "lifecycle", + "tags": "common" + } + }, "lifecycle-image-pull-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-use-imagepullpolicy-if-not-present", - "description": "Ensure that the containers under test are using IfNotPresent as Image Pull Policy. If there is a situation where the container dies and needs to be restarted, the image pull policy becomes important. PullIfNotPresent is recommended so that a loss of image registry access does not prevent the pod from restarting.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the containers under test are using IfNotPresent as Image Pull Policy." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565946717 -0500 CDT m=+20.100870891", - "state": "skipped", - "testID": { - "id": "lifecycle-image-pull-policy", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-use-imagepullpolicy-if-not-present", + "description": "Ensure that the containers under test are using IfNotPresent as Image Pull Policy. If there is a situation where the container dies and needs to be restarted, the image pull policy becomes important. PullIfNotPresent is recommended so that a loss of image registry access does not prevent the pod from restarting.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the containers under test are using IfNotPresent as Image Pull Policy." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565946717 -0500 CDT m=+20.100870891", + "state": "skipped", + "testID": { + "id": "lifecycle-image-pull-policy", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-liveness-probe": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Check that all containers under test have liveness probe defined. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. When starting up, health probes like liveness and readiness checks can be put into place to ensure the application is functioning properly.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Add a liveness probe to deployed containers. CNFs shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565994768 -0500 CDT m=+20.100918958", - "state": "skipped", - "testID": { - "id": "lifecycle-liveness-probe", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Check that all containers under test have liveness probe defined. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. When starting up, health probes like liveness and readiness checks can be put into place to ensure the application is functioning properly.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Add a liveness probe to deployed containers. CNFs shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565994768 -0500 CDT m=+20.100918958", + "state": "skipped", + "testID": { + "id": "lifecycle-liveness-probe", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-persistent-volume-reclaim-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-csi", - "description": "Check that the persistent volumes the CNF pods are using have a reclaim policy of delete. Network Functions should clear persistent storage by deleting their PVs when removing their application from a cluster.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that all persistent volumes are using the reclaim policy: delete" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566148821 -0500 CDT m=+20.101072996", - "state": "skipped", - "testID": { - "id": "lifecycle-persistent-volume-reclaim-policy", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-csi", + "description": "Check that the persistent volumes the CNF pods are using have a reclaim policy of delete. Network Functions should clear persistent storage by deleting their PVs when removing their application from a cluster.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that all persistent volumes are using the reclaim policy: delete" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566148821 -0500 CDT m=+20.101072996", + "state": "skipped", + "testID": { + "id": "lifecycle-persistent-volume-reclaim-policy", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-pod-high-availability": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Ensures that CNF Pods specify podAntiAffinity rules and replica value is set to more than 1.", - "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", - "remediation": "In high availability cases, Pod podAntiAffinity rule should be specified for pod scheduling and pod replica value is set to more than 1 ." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566045362 -0500 CDT m=+20.100969551", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-high-availability", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Ensures that CNF Pods specify podAntiAffinity rules and replica value is set to more than 1.", + "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", + "remediation": "In high availability cases, Pod podAntiAffinity rule should be specified for pod scheduling and pod replica value is set to more than 1 ." }, - "lifecycle-pod-owner-type": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-no-naked-pods", - "description": "Tests that CNF Pod(s) are deployed as part of a ReplicaSet(s)/StatefulSet(s).", - "exceptionProcess": "There is no documented exception process for this. Pods should not be deployed as DaemonSet or naked pods.", - "remediation": "Deploy the CNF using ReplicaSet/StatefulSet." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566025512 -0500 CDT m=+20.100949687", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-owner-type", - "suite": "lifecycle", - "tags": "telco" - } + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566045362 -0500 CDT m=+20.100969551", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-high-availability", + "suite": "lifecycle", + "tags": "common" + } + }, + "lifecycle-pod-owner-type": { + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-no-naked-pods", + "description": "Tests that CNF Pod(s) are deployed as part of a ReplicaSet(s)/StatefulSet(s).", + "exceptionProcess": "There is no documented exception process for this. Pods should not be deployed as DaemonSet or naked pods.", + "remediation": "Deploy the CNF using ReplicaSet/StatefulSet." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566025512 -0500 CDT m=+20.100949687", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-owner-type", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-pod-recreation": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations", - "description": "Tests that a CNF is configured to support High Availability. First, this test cordons and drains a Node that hosts the CNF Pod. Next, the test ensures that OpenShift can re-instantiate the Pod on another Node, and that the actual replica count matches the desired replica count.", - "exceptionProcess": "No exceptions - workloads should be able to be restarted/recreated.", - "remediation": "Ensure that CNF Pod(s) utilize a configuration that supports High Availability. Additionally, ensure that there are available Nodes in the OpenShift cluster that can be utilized in the event that a host Node fails." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566092278 -0500 CDT m=+20.101016453", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-recreation", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations", + "description": "Tests that a CNF is configured to support High Availability. First, this test cordons and drains a Node that hosts the CNF Pod. Next, the test ensures that OpenShift can re-instantiate the Pod on another Node, and that the actual replica count matches the desired replica count.", + "exceptionProcess": "No exceptions - workloads should be able to be restarted/recreated.", + "remediation": "Ensure that CNF Pod(s) utilize a configuration that supports High Availability. Additionally, ensure that there are available Nodes in the OpenShift cluster that can be utilized in the event that a host Node fails." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566092278 -0500 CDT m=+20.101016453", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-recreation", + "suite": "lifecycle", + "tags": "common" + } + }, "lifecycle-pod-scheduling": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Ensures that CNF Pods do not specify nodeSelector or nodeAffinity. In most cases, Pods should allow for instantiation on any underlying Node. CNFs shall not use node selectors nor taints/tolerations to assign pod location.", - "exceptionProcess": "Exception will only be considered if application requires specialized hardware. Must specify which container requires special hardware and why.", - "remediation": "In most cases, Pod's should not specify their host Nodes through nodeSelector or nodeAffinity. However, there are cases in which CNFs require specialized hardware specific to a particular class of Node." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566073238 -0500 CDT m=+20.100997412", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-scheduling", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Ensures that CNF Pods do not specify nodeSelector or nodeAffinity. In most cases, Pods should allow for instantiation on any underlying Node. CNFs shall not use node selectors nor taints/tolerations to assign pod location.", + "exceptionProcess": "Exception will only be considered if application requires specialized hardware. Must specify which container requires special hardware and why.", + "remediation": "In most cases, Pod's should not specify their host Nodes through nodeSelector or nodeAffinity. However, there are cases in which CNFs require specialized hardware specific to a particular class of Node." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566073238 -0500 CDT m=+20.100997412", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-scheduling", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-pod-toleration-bypass": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-taints-and-tolerations", - "description": "Check that pods do not have NoExecute, PreferNoSchedule, or NoSchedule tolerations that have been modified from the default.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Do not allow pods to bypass the NoExecute, PreferNoSchedule, or NoSchedule tolerations that are default applied by Kubernetes." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566224664 -0500 CDT m=+20.101148839", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-toleration-bypass", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-taints-and-tolerations", + "description": "Check that pods do not have NoExecute, PreferNoSchedule, or NoSchedule tolerations that have been modified from the default.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Do not allow pods to bypass the NoExecute, PreferNoSchedule, or NoSchedule tolerations that are default applied by Kubernetes." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566224664 -0500 CDT m=+20.101148839", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-toleration-bypass", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-readiness-probe": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Check that all containers under test have readiness probe defined. There are different ways a pod can stop on on OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Add a readiness probe to deployed containers" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565977061 -0500 CDT m=+20.100901237", - "state": "skipped", - "testID": { - "id": "lifecycle-readiness-probe", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Check that all containers under test have readiness probe defined. There are different ways a pod can stop on on OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Add a readiness probe to deployed containers" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565977061 -0500 CDT m=+20.100901237", + "state": "skipped", + "testID": { + "id": "lifecycle-readiness-probe", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-startup-probe": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status", - "description": "Check that all containers under test have startup probe defined. CNFs shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Add a startup probe to deployed containers" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566011546 -0500 CDT m=+20.100935721", - "state": "skipped", - "testID": { - "id": "lifecycle-startup-probe", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status", + "description": "Check that all containers under test have startup probe defined. CNFs shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Add a startup probe to deployed containers" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566011546 -0500 CDT m=+20.100935721", + "state": "skipped", + "testID": { + "id": "lifecycle-startup-probe", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-statefulset-scaling": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Tests that CNF statefulsets support scale in/out operations. First, the test starts getting the current replicaCount (N) of the statefulset/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the statefulset/s. In case of statefulsets that are managed by HPA the test is changing the min and max value to statefulset Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the statefulset/s", - "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", - "remediation": "Ensure CNF statefulsets/replica sets can scale in/out successfully." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566129433 -0500 CDT m=+20.101053606", - "state": "skipped", - "testID": { - "id": "lifecycle-statefulset-scaling", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Tests that CNF statefulsets support scale in/out operations. First, the test starts getting the current replicaCount (N) of the statefulset/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the statefulset/s. In case of statefulsets that are managed by HPA the test is changing the min and max value to statefulset Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the statefulset/s", + "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", + "remediation": "Ensure CNF statefulsets/replica sets can scale in/out successfully." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566129433 -0500 CDT m=+20.101053606", + "state": "skipped", + "testID": { + "id": "lifecycle-statefulset-scaling", + "suite": "lifecycle", + "tags": "common" + } + }, "lifecycle-storage-required-pods": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-local-storage", - "description": "Checks that pods do not place persistent volumes on local storage.", - "exceptionProcess": "No exceptions", - "remediation": "If pod is StatefulSet, make sure servicename is not local-storage (persistent volumes should not be on local storage)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.566243675 -0500 CDT m=+20.101167865", - "state": "skipped", - "testID": { - "id": "lifecycle-storage-required-pods", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-local-storage", + "description": "Checks that pods do not place persistent volumes on local storage.", + "exceptionProcess": "No exceptions", + "remediation": "If pod is StatefulSet, make sure servicename is not local-storage (persistent volumes should not be on local storage)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.566243675 -0500 CDT m=+20.101167865", + "state": "skipped", + "testID": { + "id": "lifecycle-storage-required-pods", + "suite": "lifecycle", + "tags": "common" + } + }, "manageability-container-port-name-format": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", - "description": "Check that the container's ports name follow the naming conventions. Name field in ContainerPort section must be of form `\u003cprotocol\u003e[-\u003csuffix\u003e]`. More naming convention requirements may be released in future", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure that the container's ports name follow our partner naming conventions" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.658615636 -0500 CDT m=+36.193539811", - "state": "skipped", - "testID": { - "id": "manageability-container-port-name-format", - "suite": "manageability", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", + "description": "Check that the container's ports name follow the naming conventions. Name field in ContainerPort section must be of form `\u003cprotocol\u003e[-\u003csuffix\u003e]`. More naming convention requirements may be released in future", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure that the container's ports name follow our partner naming conventions" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.658615636 -0500 CDT m=+36.193539811", + "state": "skipped", + "testID": { + "id": "manageability-container-port-name-format", + "suite": "manageability", + "tags": "extended" + } + }, "manageability-containers-image-tag": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-tagging", - "description": "Check that image tag exists on containers.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure that all the container images are tagged. Checks containers have image tags (e.g. latest, stable, dev)." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.658596375 -0500 CDT m=+36.193520551", - "state": "skipped", - "testID": { - "id": "manageability-containers-image-tag", - "suite": "manageability", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-tagging", + "description": "Check that image tag exists on containers.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure that all the container images are tagged. Checks containers have image tags (e.g. latest, stable, dev)." }, - "networking-dpdk-cpu-pinning-exec-probe": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning", - "description": "If a CNF is doing CPU pinning, exec probes may not be used.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "If the CNF is doing CPU pinning and running a DPDK process do not use exec probes (executing a command within the container) as it may pile up and block the node eventually." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.56585899 -0500 CDT m=+20.100783165", - "state": "skipped", - "testID": { - "id": "networking-dpdk-cpu-pinning-exec-probe", - "suite": "networking", - "tags": "telco" - } + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.658596375 -0500 CDT m=+36.193520551", + "state": "skipped", + "testID": { + "id": "manageability-containers-image-tag", + "suite": "manageability", + "tags": "extended" + } + }, + "performance-cpu-pinning-no-exec-probes": { + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning", + "description": "Workloads utilizing CPU pinning (Guaranteed QoS with exclusive CPUs) should not use exec probes. Exec probes run a command within the container, which could interfere with latency-sensitive workloads and cause performance degradation.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Workloads that use CPU pinning (Guaranteed QoS with exclusive CPUs) should not use exec probes. Use httpGet or tcpSocket probes instead, as exec probes can interfere with latency-sensitive workloads requiring non-interruptible task execution." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.56585899 -0500 CDT m=+20.100783165", + "state": "skipped", + "testID": { + "id": "performance-cpu-pinning-no-exec-probes", + "suite": "performance", + "tags": "telco" + } + }, "networking-dual-stack-service": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", - "description": "Checks that all services in namespaces under test are either ipv6 single stack or dual stack. This test case requires the deployment of the probe daemonset.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Configure every CNF services with either a single stack ipv6 or dual stack (ipv4/ipv6) load balancer." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565804542 -0500 CDT m=+20.100728718", - "state": "skipped", - "testID": { - "id": "networking-dual-stack-service", - "suite": "networking", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", + "description": "Checks that all services in namespaces under test are either ipv6 single stack or dual stack. This test case requires the deployment of the probe daemonset.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Configure every CNF services with either a single stack ipv6 or dual stack (ipv4/ipv6) load balancer." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565804542 -0500 CDT m=+20.100728718", + "state": "skipped", + "testID": { + "id": "networking-dual-stack-service", + "suite": "networking", + "tags": "extended" + } + }, "networking-icmpv4-connectivity": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", - "description": "Checks that each CNF Container is able to communicate via ICMPv4 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset.", - "exceptionProcess": "No exceptions - must be able to communicate on default network using IPv4", - "remediation": "Ensure that the CNF is able to communicate via the Default OpenShift network. In some rare cases, CNFs may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.5656687 -0500 CDT m=+20.100592905", - "state": "skipped", - "testID": { - "id": "networking-icmpv4-connectivity", - "suite": "networking", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", + "description": "Checks that each CNF Container is able to communicate via ICMPv4 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset.", + "exceptionProcess": "No exceptions - must be able to communicate on default network using IPv4", + "remediation": "Ensure that the CNF is able to communicate via the Default OpenShift network. In some rare cases, CNFs may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.5656687 -0500 CDT m=+20.100592905", + "state": "skipped", + "testID": { + "id": "networking-icmpv4-connectivity", + "suite": "networking", + "tags": "common" + } + }, "networking-icmpv4-connectivity-multus": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Checks that each CNF Container is able to communicate via ICMPv4 on the Multus network(s). This test case requires the Deployment of the probe daemonset.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the CNF is able to communicate via the Multus network(s). In some rare cases, CNFs may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if MULTUS is not supported." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565694716 -0500 CDT m=+20.100618891", - "state": "skipped", - "testID": { - "id": "networking-icmpv4-connectivity-multus", - "suite": "networking", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Checks that each CNF Container is able to communicate via ICMPv4 on the Multus network(s). This test case requires the Deployment of the probe daemonset.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the CNF is able to communicate via the Multus network(s). In some rare cases, CNFs may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if MULTUS is not supported." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565694716 -0500 CDT m=+20.100618891", + "state": "skipped", + "testID": { + "id": "networking-icmpv4-connectivity-multus", + "suite": "networking", + "tags": "telco" + } + }, "networking-icmpv6-connectivity": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", - "description": "Checks that each CNF Container is able to communicate via ICMPv6 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the CNF is able to communicate via the Default OpenShift network. In some rare cases, CNFs may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if IPv6 is not supported." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565721905 -0500 CDT m=+20.100646081", - "state": "skipped", - "testID": { - "id": "networking-icmpv6-connectivity", - "suite": "networking", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", + "description": "Checks that each CNF Container is able to communicate via ICMPv6 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the CNF is able to communicate via the Default OpenShift network. In some rare cases, CNFs may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if IPv6 is not supported." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565721905 -0500 CDT m=+20.100646081", + "state": "skipped", + "testID": { + "id": "networking-icmpv6-connectivity", + "suite": "networking", + "tags": "common" + } + }, "networking-icmpv6-connectivity-multus": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Checks that each CNF Container is able to communicate via ICMPv6 on the Multus network(s). This test case requires the Deployment of the probe daemonset.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the CNF is able to communicate via the Multus network(s). In some rare cases, CNFs may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it.The label value is trivial, only its presence. Not applicable if IPv6/MULTUS is not supported." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565742206 -0500 CDT m=+20.100666381", - "state": "skipped", - "testID": { - "id": "networking-icmpv6-connectivity-multus", - "suite": "networking", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Checks that each CNF Container is able to communicate via ICMPv6 on the Multus network(s). This test case requires the Deployment of the probe daemonset.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the CNF is able to communicate via the Multus network(s). In some rare cases, CNFs may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it.The label value is trivial, only its presence. Not applicable if IPv6/MULTUS is not supported." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565742206 -0500 CDT m=+20.100666381", + "state": "skipped", + "testID": { + "id": "networking-icmpv6-connectivity-multus", + "suite": "networking", + "tags": "telco" + } + }, "networking-network-policy-deny-all": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-vrfs-aka-routing-instances", - "description": "Check that network policies attached to namespaces running CNF pods contain a default deny-all rule for both ingress and egress traffic", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure that a NetworkPolicy with a default deny-all is applied. After the default is applied, apply a network policy to allow the traffic your application requires." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565825618 -0500 CDT m=+20.100749791", - "state": "skipped", - "testID": { - "id": "networking-network-policy-deny-all", - "suite": "networking", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-vrfs-aka-routing-instances", + "description": "Check that network policies attached to namespaces running CNF pods contain a default deny-all rule for both ingress and egress traffic", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure that a NetworkPolicy with a default deny-all is applied. After the default is applied, apply a network policy to allow the traffic your application requires." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565825618 -0500 CDT m=+20.100749791", + "state": "skipped", + "testID": { + "id": "networking-network-policy-deny-all", + "suite": "networking", + "tags": "common" + } + }, "networking-ocp-reserved-ports-usage": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ports-reserved-by-openshift", - "description": "Check that containers do not listen on ports that are reserved by OpenShift", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that CNF apps do not listen on ports that are reserved by OpenShift. The following ports are reserved by OpenShift and must NOT be used by any application: 22623, 22624." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565786079 -0500 CDT m=+20.100710254", - "state": "skipped", - "testID": { - "id": "networking-ocp-reserved-ports-usage", - "suite": "networking", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ports-reserved-by-openshift", + "description": "Check that containers do not listen on ports that are reserved by OpenShift", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that CNF apps do not listen on ports that are reserved by OpenShift. The following ports are reserved by OpenShift and must NOT be used by any application: 22623, 22624." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565786079 -0500 CDT m=+20.100710254", + "state": "skipped", + "testID": { + "id": "networking-ocp-reserved-ports-usage", + "suite": "networking", + "tags": "common" + } + }, "networking-reserved-partner-ports": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Extended", - "description": "Checks that pods and containers are not consuming ports designated as reserved by partner", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure ports are not being used that are reserved by our partner" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565843941 -0500 CDT m=+20.100768115", - "state": "skipped", - "testID": { - "id": "networking-reserved-partner-ports", - "suite": "networking", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Extended", + "description": "Checks that pods and containers are not consuming ports designated as reserved by partner", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure ports are not being used that are reserved by our partner" }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565843941 -0500 CDT m=+20.100768115", + "state": "skipped", + "testID": { + "id": "networking-reserved-partner-ports", + "suite": "networking", + "tags": "extended" + } + }, "networking-restart-on-reboot-sriov-pod": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that the label restart-on-reboot exists on pods that use SRIOV network interfaces.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the label restart-on-reboot exists on pods that use SRIOV network interfaces." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565876872 -0500 CDT m=+20.100801048", - "state": "skipped", - "testID": { - "id": "networking-restart-on-reboot-sriov-pod", - "suite": "networking", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that the label restart-on-reboot exists on pods that use SRIOV network interfaces.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the label restart-on-reboot exists on pods that use SRIOV network interfaces." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565876872 -0500 CDT m=+20.100801048", + "state": "skipped", + "testID": { + "id": "networking-restart-on-reboot-sriov-pod", + "suite": "networking", + "tags": "faredge" + } + }, "networking-undeclared-container-ports-usage": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", - "description": "Check that containers do not listen on ports that weren't declared in their specification. Platforms may be configured to block undeclared ports.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure the CNF apps do not listen on undeclared containers' ports." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565766442 -0500 CDT m=+20.100690615", - "state": "skipped", - "testID": { - "id": "networking-undeclared-container-ports-usage", - "suite": "networking", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", + "description": "Check that containers do not listen on ports that weren't declared in their specification. Platforms may be configured to block undeclared ports.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure the CNF apps do not listen on undeclared containers' ports." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565766442 -0500 CDT m=+20.100690615", + "state": "skipped", + "testID": { + "id": "networking-undeclared-container-ports-usage", + "suite": "networking", + "tags": "extended" + } + }, "observability-container-logging": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-logging", - "description": "Check that all containers under test use standard input output and standard error when logging. A container must provide APIs for the platform to observe the container health and act accordingly. These APIs include health checks (liveness and readiness), logging to stderr and stdout for log aggregation (by tools such as Logstash or Filebeat), and integrate with tracing and metrics-gathering libraries (such as Prometheus or Metricbeat).", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure containers are not redirecting stdout/stderr" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565224144 -0500 CDT m=+20.100148319", - "state": "skipped", - "testID": { - "id": "observability-container-logging", - "suite": "observability", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-logging", + "description": "Check that all containers under test use standard input output and standard error when logging. A container must provide APIs for the platform to observe the container health and act accordingly. These APIs include health checks (liveness and readiness), logging to stderr and stdout for log aggregation (by tools such as Logstash or Filebeat), and integrate with tracing and metrics-gathering libraries (such as Prometheus or Metricbeat).", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure containers are not redirecting stdout/stderr" }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565224144 -0500 CDT m=+20.100148319", + "state": "skipped", + "testID": { + "id": "observability-container-logging", + "suite": "observability", + "tags": "telco" + } + }, "observability-crd-status": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "Checks that all CRDs have a status sub-resource specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]).", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that all the CRDs have a meaningful status specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”])." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565256113 -0500 CDT m=+20.100180288", - "state": "skipped", - "testID": { - "id": "observability-crd-status", - "suite": "observability", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "Checks that all CRDs have a status sub-resource specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]).", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that all the CRDs have a meaningful status specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”])." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565256113 -0500 CDT m=+20.100180288", + "state": "skipped", + "testID": { + "id": "observability-crd-status", + "suite": "observability", + "tags": "common" + } + }, "observability-pod-disruption-budget": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations", - "description": "Checks to see if pod disruption budgets have allowed values for minAvailable and maxUnavailable", - "exceptionProcess": "No exceptions", - "remediation": "Ensure minAvailable is not zero and maxUnavailable does not equal the number of pods in the replica" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565303849 -0500 CDT m=+20.100228023", - "state": "skipped", - "testID": { - "id": "observability-pod-disruption-budget", - "suite": "observability", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations", + "description": "Checks to see if pod disruption budgets have allowed values for minAvailable and maxUnavailable", + "exceptionProcess": "No exceptions", + "remediation": "Ensure minAvailable is not zero and maxUnavailable does not equal the number of pods in the replica" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565303849 -0500 CDT m=+20.100228023", + "state": "skipped", + "testID": { + "id": "observability-pod-disruption-budget", + "suite": "observability", + "tags": "common" + } + }, "observability-termination-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status", - "description": "Check that all containers are using terminationMessagePolicy: FallbackToLogsOnError. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure containers are all using FallbackToLogsOnError in terminationMessagePolicy" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565277313 -0500 CDT m=+20.100201488", - "state": "skipped", - "testID": { - "id": "observability-termination-policy", - "suite": "observability", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status", + "description": "Check that all containers are using terminationMessagePolicy: FallbackToLogsOnError. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure containers are all using FallbackToLogsOnError in terminationMessagePolicy" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565277313 -0500 CDT m=+20.100201488", + "state": "skipped", + "testID": { + "id": "observability-termination-policy", + "suite": "observability", + "tags": "telco" + } + }, "operator-install-source": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "Tests whether a CNF Operator is installed via OLM.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that your Operator is installed via OLM." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.658551609 -0500 CDT m=+36.193475785", - "state": "skipped", - "testID": { - "id": "operator-install-source", - "suite": "operator", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "Tests whether a CNF Operator is installed via OLM.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that your Operator is installed via OLM." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.658551609 -0500 CDT m=+36.193475785", + "state": "skipped", + "testID": { + "id": "operator-install-source", + "suite": "operator", + "tags": "common" + } + }, "operator-install-status-no-privileges": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "The operator is not installed with privileged rights. Test passes if clusterPermissions is not present in the CSV manifest or is present with no resourceNames under its rules.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure all the CNF operators have no privileges on cluster resources." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.658523468 -0500 CDT m=+36.193447644", - "state": "skipped", - "testID": { - "id": "operator-install-status-no-privileges", - "suite": "operator", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "The operator is not installed with privileged rights. Test passes if clusterPermissions is not present in the CSV manifest or is present with no resourceNames under its rules.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure all the CNF operators have no privileges on cluster resources." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.658523468 -0500 CDT m=+36.193447644", + "state": "skipped", + "testID": { + "id": "operator-install-status-no-privileges", + "suite": "operator", + "tags": "common" + } + }, "operator-install-status-succeeded": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "Ensures that the target CNF operators report \"Succeeded\" as their installation status.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure all the CNF operators have been successfully installed by OLM." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:19:06.65849962 -0500 CDT m=+36.193423795", - "state": "skipped", - "testID": { - "id": "operator-install-status-succeeded", - "suite": "operator", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "Ensures that the target CNF operators report \"Succeeded\" as their installation status.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure all the CNF operators have been successfully installed by OLM." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:19:06.65849962 -0500 CDT m=+36.193423795", + "state": "skipped", + "testID": { + "id": "operator-install-status-succeeded", + "suite": "operator", + "tags": "common" + } + }, "performance-exclusive-cpu-pool": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that if one container in a Pod selects an exclusive CPU pool the rest select the same type of CPU pool", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that if one container in a Pod selects an exclusive CPU pool the rest also select this type of CPU pool" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.5649692 -0500 CDT m=+20.099893375", - "state": "skipped", - "testID": { - "id": "performance-exclusive-cpu-pool", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that if one container in a Pod selects an exclusive CPU pool the rest select the same type of CPU pool", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that if one container in a Pod selects an exclusive CPU pool the rest also select this type of CPU pool" }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.5649692 -0500 CDT m=+20.099893375", + "state": "skipped", + "testID": { + "id": "performance-exclusive-cpu-pool", + "suite": "performance", + "tags": "faredge" + } + }, "performance-exclusive-cpu-pool-rt-scheduling-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that if application workload runs in exclusive CPU pool, it chooses RT CPU schedule policy and set the priority less than 10.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the workload running in Application exclusive CPU pool can choose RT CPU scheduling policy, but should set priority less than 10" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565142204 -0500 CDT m=+20.100066402", - "state": "skipped", - "testID": { - "id": "performance-exclusive-cpu-pool-rt-scheduling-policy", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that if application workload runs in exclusive CPU pool, it chooses RT CPU schedule policy and set the priority less than 10.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the workload running in Application exclusive CPU pool can choose RT CPU scheduling policy, but should set priority less than 10" + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565142204 -0500 CDT m=+20.100066402", + "state": "skipped", + "testID": { + "id": "performance-exclusive-cpu-pool-rt-scheduling-policy", + "suite": "performance", + "tags": "faredge" + } + }, "performance-isolated-cpu-pool-rt-scheduling-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that a workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy (such as SCHED_FIFO/SCHED_RR) with High priority." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565202191 -0500 CDT m=+20.100126368", - "state": "skipped", - "testID": { - "id": "performance-isolated-cpu-pool-rt-scheduling-policy", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that a workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy (such as SCHED_FIFO/SCHED_RR) with High priority." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565202191 -0500 CDT m=+20.100126368", + "state": "skipped", + "testID": { + "id": "performance-isolated-cpu-pool-rt-scheduling-policy", + "suite": "performance", + "tags": "faredge" + } + }, "performance-max-resources-exec-probes": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Checks that less than 10 exec probes are configured in the cluster for this CNF. Also checks that the periodSeconds parameter for each probe is superior or equal to 10.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Reduce the number of exec probes in the cluster for this CNF to less than 10. Increase the update period of the exec probe to be superior or equal to 10 seconds." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565170556 -0500 CDT m=+20.100094732", - "state": "skipped", - "testID": { - "id": "performance-max-resources-exec-probes", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Checks that less than 10 exec probes are configured in the cluster for this CNF. Also checks that the periodSeconds parameter for each probe is superior or equal to 10.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Reduce the number of exec probes in the cluster for this CNF to less than 10. Increase the update period of the exec probe to be superior or equal to 10 seconds." }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565170556 -0500 CDT m=+20.100094732", + "state": "skipped", + "testID": { + "id": "performance-max-resources-exec-probes", + "suite": "performance", + "tags": "faredge" + } + }, "performance-rt-apps-no-exec-probes": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that if one container runs a real time application exec probes are not used", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that if one container runs a real time application exec probes are not used" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565044986 -0500 CDT m=+20.099969160", - "state": "skipped", - "testID": { - "id": "performance-rt-apps-no-exec-probes", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that if one container runs a real time application exec probes are not used", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that if one container runs a real time application exec probes are not used" + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565044986 -0500 CDT m=+20.099969160", + "state": "skipped", + "testID": { + "id": "performance-rt-apps-no-exec-probes", + "suite": "performance", + "tags": "faredge" + } + }, "performance-shared-cpu-pool-non-rt-scheduling-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that if application workload runs in shared CPU pool, it chooses non-RT CPU schedule policy to always share the CPU with other applications and kernel threads.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the workload running in Application shared CPU pool should choose non-RT CPU schedule policy, like SCHED _OTHER to always share the CPU with other applications and kernel threads." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.56508862 -0500 CDT m=+20.100012795", - "state": "skipped", - "testID": { - "id": "performance-shared-cpu-pool-non-rt-scheduling-policy", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that if application workload runs in shared CPU pool, it chooses non-RT CPU schedule policy to always share the CPU with other applications and kernel threads.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the workload running in Application shared CPU pool should choose non-RT CPU schedule policy, like SCHED _OTHER to always share the CPU with other applications and kernel threads." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.56508862 -0500 CDT m=+20.100012795", + "state": "skipped", + "testID": { + "id": "performance-shared-cpu-pool-non-rt-scheduling-policy", + "suite": "performance", + "tags": "faredge" + } + }, "platform-alteration-base-image": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-standards", - "description": "Ensures that the Container Base Image is not altered post-startup. This test is a heuristic, and ensures that there are no changes to the following directories: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that Container applications do not modify the Container Base Image. In particular, ensure that the following directories are not modified: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64 Ensure that all required binaries are built directly into the container image, and are not installed post startup." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565365098 -0500 CDT m=+20.100289274", - "state": "skipped", - "testID": { - "id": "platform-alteration-base-image", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-standards", + "description": "Ensures that the Container Base Image is not altered post-startup. This test is a heuristic, and ensures that there are no changes to the following directories: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that Container applications do not modify the Container Base Image. In particular, ensure that the following directories are not modified: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64 Ensure that all required binaries are built directly into the container image, and are not installed post startup." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565365098 -0500 CDT m=+20.100289274", + "state": "skipped", + "testID": { + "id": "platform-alteration-base-image", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-boot-params": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os", - "description": "Tests that boot parameters are set through the MachineConfigOperator, and not set manually on the Node.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that boot parameters are set directly through the MachineConfigOperator, or indirectly through the PerformanceAddonOperator. Boot parameters should not be changed directly through the Node, as OpenShift should manage the changes for you." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565494647 -0500 CDT m=+20.100418822", - "state": "skipped", - "testID": { - "id": "platform-alteration-boot-params", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os", + "description": "Tests that boot parameters are set through the MachineConfigOperator, and not set manually on the Node.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that boot parameters are set directly through the MachineConfigOperator, or indirectly through the PerformanceAddonOperator. Boot parameters should not be changed directly through the Node, as OpenShift should manage the changes for you." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565494647 -0500 CDT m=+20.100418822", + "state": "skipped", + "testID": { + "id": "platform-alteration-boot-params", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-hugepages-1g-only": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Check that pods using hugepages only use 1Gi size", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Modify pod to consume 1Gi hugepages only" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565634975 -0500 CDT m=+20.100559178", - "state": "skipped", - "testID": { - "id": "platform-alteration-hugepages-1g-only", - "suite": "platform-alteration", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Check that pods using hugepages only use 1Gi size", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Modify pod to consume 1Gi hugepages only" + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565634975 -0500 CDT m=+20.100559178", + "state": "skipped", + "testID": { + "id": "platform-alteration-hugepages-1g-only", + "suite": "platform-alteration", + "tags": "faredge" + } + }, "platform-alteration-hugepages-2m-only": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages", - "description": "Check that pods using hugepages only use 2Mi size", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Modify pod to consume 2Mi hugepages only" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565610579 -0500 CDT m=+20.100534753", - "state": "skipped", - "testID": { - "id": "platform-alteration-hugepages-2m-only", - "suite": "platform-alteration", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages", + "description": "Check that pods using hugepages only use 2Mi size", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Modify pod to consume 2Mi hugepages only" }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565610579 -0500 CDT m=+20.100534753", + "state": "skipped", + "testID": { + "id": "platform-alteration-hugepages-2m-only", + "suite": "platform-alteration", + "tags": "extended" + } + }, "platform-alteration-hugepages-config": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages", - "description": "Checks to see that HugePage settings have been configured through MachineConfig, and not manually on the underlying Node. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet. First, the \"worker\" MachineConfig is polled, and the Hugepage settings are extracted. Next, the underlying Nodes are polled for configured HugePages through inspection of /proc/meminfo. The results are compared, and the test passes only if they are the same.", - "exceptionProcess": "No exceptions", - "remediation": "HugePage settings should be configured either directly through the MachineConfigOperator or indirectly using the PerformanceAddonOperator. This ensures that OpenShift is aware of the special MachineConfig requirements, and can provision your CNF on a Node that is part of the corresponding MachineConfigSet. Avoid making changes directly to an underlying Node, and let OpenShift handle the heavy lifting of configuring advanced settings. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565475723 -0500 CDT m=+20.100399897", - "state": "skipped", - "testID": { - "id": "platform-alteration-hugepages-config", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages", + "description": "Checks to see that HugePage settings have been configured through MachineConfig, and not manually on the underlying Node. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet. First, the \"worker\" MachineConfig is polled, and the Hugepage settings are extracted. Next, the underlying Nodes are polled for configured HugePages through inspection of /proc/meminfo. The results are compared, and the test passes only if they are the same.", + "exceptionProcess": "No exceptions", + "remediation": "HugePage settings should be configured either directly through the MachineConfigOperator or indirectly using the PerformanceAddonOperator. This ensures that OpenShift is aware of the special MachineConfig requirements, and can provision your CNF on a Node that is part of the corresponding MachineConfigSet. Avoid making changes directly to an underlying Node, and let OpenShift handle the heavy lifting of configuring advanced settings. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565475723 -0500 CDT m=+20.100399897", + "state": "skipped", + "testID": { + "id": "platform-alteration-hugepages-config", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-hyperthread-enable": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Extended", - "description": "Check that baremetal workers have hyperthreading enabled", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Check that baremetal workers have hyperthreading enabled" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565325812 -0500 CDT m=+20.100249986", - "state": "skipped", - "testID": { - "id": "platform-alteration-hyperthread-enable", - "suite": "platform-alteration", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Extended", + "description": "Check that baremetal workers have hyperthreading enabled", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Check that baremetal workers have hyperthreading enabled" + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565325812 -0500 CDT m=+20.100249986", + "state": "skipped", + "testID": { + "id": "platform-alteration-hyperthread-enable", + "suite": "platform-alteration", + "tags": "extended" + } + }, "platform-alteration-is-selinux-enforcing": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-security", - "description": "verifies that all openshift platform/cluster nodes have selinux in \"Enforcing\" mode.", - "exceptionProcess": "No exceptions", - "remediation": "Configure selinux and enable enforcing mode." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565457298 -0500 CDT m=+20.100381472", - "state": "skipped", - "testID": { - "id": "platform-alteration-is-selinux-enforcing", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-security", + "description": "verifies that all openshift platform/cluster nodes have selinux in \"Enforcing\" mode.", + "exceptionProcess": "No exceptions", + "remediation": "Configure selinux and enable enforcing mode." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565457298 -0500 CDT m=+20.100381472", + "state": "skipped", + "testID": { + "id": "platform-alteration-is-selinux-enforcing", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-isredhat-release": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-base-images", - "description": "verifies if the container base image is redhat.", - "exceptionProcess": "No exceptions", - "remediation": "Build a new container image that is based on UBI (Red Hat Universal Base Image)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565416803 -0500 CDT m=+20.100340978", - "state": "skipped", - "testID": { - "id": "platform-alteration-isredhat-release", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-base-images", + "description": "verifies if the container base image is redhat.", + "exceptionProcess": "No exceptions", + "remediation": "Build a new container image that is based on UBI (Red Hat Universal Base Image)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565416803 -0500 CDT m=+20.100340978", + "state": "skipped", + "testID": { + "id": "platform-alteration-isredhat-release", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-ocp-lifecycle": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-k8s", - "description": "Tests that the running OCP version is not end of life.", - "exceptionProcess": "No exceptions", - "remediation": "Please update your cluster to a version that is generally available." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565570329 -0500 CDT m=+20.100494505", - "state": "skipped", - "testID": { - "id": "platform-alteration-ocp-lifecycle", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-k8s", + "description": "Tests that the running OCP version is not end of life.", + "exceptionProcess": "No exceptions", + "remediation": "Please update your cluster to a version that is generally available." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565570329 -0500 CDT m=+20.100494505", + "state": "skipped", + "testID": { + "id": "platform-alteration-ocp-lifecycle", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-ocp-node-os-lifecycle": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os", - "description": "Tests that the nodes running in the cluster have operating systems that are compatible with the deployed version of OpenShift.", - "exceptionProcess": "No exceptions", - "remediation": "Please update your workers to a version that is supported by your version of OpenShift" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565588426 -0500 CDT m=+20.100512601", - "state": "skipped", - "testID": { - "id": "platform-alteration-ocp-node-os-lifecycle", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os", + "description": "Tests that the nodes running in the cluster have operating systems that are compatible with the deployed version of OpenShift.", + "exceptionProcess": "No exceptions", + "remediation": "Please update your workers to a version that is supported by your version of OpenShift" }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565588426 -0500 CDT m=+20.100512601", + "state": "skipped", + "testID": { + "id": "platform-alteration-ocp-node-os-lifecycle", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-service-mesh-usage": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Extended", - "description": "Checks if the istio namespace (\"istio-system\") is present. If it is present, checks that the istio sidecar is present in all pods under test.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure all the CNF pods are using service mesh if the cluster provides it." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565553571 -0500 CDT m=+20.100477761", - "state": "skipped", - "testID": { - "id": "platform-alteration-service-mesh-usage", - "suite": "platform-alteration", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Extended", + "description": "Checks if the istio namespace (\"istio-system\") is present. If it is present, checks that the istio sidecar is present in all pods under test.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure all the CNF pods are using service mesh if the cluster provides it." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565553571 -0500 CDT m=+20.100477761", + "state": "skipped", + "testID": { + "id": "platform-alteration-service-mesh-usage", + "suite": "platform-alteration", + "tags": "extended" + } + }, "platform-alteration-sysctl-config": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Tests that no one has changed the node's sysctl configs after the node was created, the tests works by checking if the sysctl configs are consistent with the MachineConfig CR which defines how the node should be configured", - "exceptionProcess": "No exceptions", - "remediation": "You should recreate the node or change the sysctls, recreating is recommended because there might be other unknown changes" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565534689 -0500 CDT m=+20.100458864", - "state": "skipped", - "testID": { - "id": "platform-alteration-sysctl-config", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Tests that no one has changed the node's sysctl configs after the node was created, the tests works by checking if the sysctl configs are consistent with the MachineConfig CR which defines how the node should be configured", + "exceptionProcess": "No exceptions", + "remediation": "You should recreate the node or change the sysctls, recreating is recommended because there might be other unknown changes" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565534689 -0500 CDT m=+20.100458864", + "state": "skipped", + "testID": { + "id": "platform-alteration-sysctl-config", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-tainted-node-kernel": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Ensures that the Node(s) hosting CNFs do not utilize tainted kernels. This test case is especially important to support Highly Available CNFs, since when a CNF is re-instantiated on a backup Node, that Node's kernel may not have the same hacks.'", - "exceptionProcess": "If taint is necessary, document details of the taint and why it's needed by workload or environment.", - "remediation": "Test failure indicates that the underlying Node's kernel is tainted. Ensure that you have not altered underlying Node(s) kernels in order to run the CNF." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:50.565396496 -0500 CDT m=+20.100320669", - "state": "skipped", - "testID": { - "id": "platform-alteration-tainted-node-kernel", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Ensures that the Node(s) hosting CNFs do not utilize tainted kernels. This test case is especially important to support Highly Available CNFs, since when a CNF is re-instantiated on a backup Node, that Node's kernel may not have the same hacks.'", + "exceptionProcess": "If taint is necessary, document details of the taint and why it's needed by workload or environment.", + "remediation": "Test failure indicates that the underlying Node's kernel is tainted. Ensure that you have not altered underlying Node(s) kernels in order to run the CNF." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:50.565396496 -0500 CDT m=+20.100320669", + "state": "skipped", + "testID": { + "id": "platform-alteration-tainted-node-kernel", + "suite": "platform-alteration", + "tags": "common" } + } }, "versions": { "claimFormat": "v0.1.0", diff --git a/cmd/certsuite/claim/compare/testdata/claim_observability.json b/cmd/certsuite/claim/compare/testdata/claim_observability.json index 8ac8cfefa..09dad95d4 100644 --- a/cmd/certsuite/claim/compare/testdata/claim_observability.json +++ b/cmd/certsuite/claim/compare/testdata/claim_observability.json @@ -1,7 +1,9 @@ { "claim": { - "configurations" : { - "AbnormalEvents" : ["EVENT1"], + "configurations": { + "AbnormalEvents": [ + "EVENT1" + ], "Config": { "acceptedKernelTaints": [ { @@ -8276,2438 +8278,2435 @@ }, "results": { "access-control-bpf-capability-check": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Telco", - "description": "Ensures that containers do not use BFP capability. CNF should avoid loading eBPF filters", - "exceptionProcess": "Exception can be considered. Must identify which container requires the capability and detail why.", - "remediation": "Remove the following capability from the container/pod definitions: BPF" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856782154 -0500 CDT m=+20.598793933", - "state": "skipped", - "testID": { - "id": "access-control-bpf-capability-check", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Telco", + "description": "Ensures that containers do not use BFP capability. CNF should avoid loading eBPF filters", + "exceptionProcess": "Exception can be considered. Must identify which container requires the capability and detail why.", + "remediation": "Remove the following capability from the container/pod definitions: BPF" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856782154 -0500 CDT m=+20.598793933", + "state": "skipped", + "testID": { + "id": "access-control-bpf-capability-check", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-cluster-role-bindings": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-rbac", - "description": "Tests that a Pod does not specify ClusterRoleBindings.", - "exceptionProcess": "Exception possible only for workloads that's cluster wide in nature and absolutely needs cluster level roles \u0026 role bindings", - "remediation": "In most cases, Pod's should not have ClusterRoleBindings. The suggested remediation is to remove the need for ClusterRoleBindings, if possible. Cluster roles and cluster role bindings discouraged unless absolutely needed by CNF (often reserved for cluster admin only)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856995843 -0500 CDT m=+20.599007627", - "state": "skipped", - "testID": { - "id": "access-control-cluster-role-bindings", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-rbac", + "description": "Tests that a Pod does not specify ClusterRoleBindings.", + "exceptionProcess": "Exception possible only for workloads that's cluster wide in nature and absolutely needs cluster level roles \u0026 role bindings", + "remediation": "In most cases, Pod's should not have ClusterRoleBindings. The suggested remediation is to remove the need for ClusterRoleBindings, if possible. Cluster roles and cluster role bindings discouraged unless absolutely needed by CNF (often reserved for cluster admin only)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856995843 -0500 CDT m=+20.599007627", + "state": "skipped", + "testID": { + "id": "access-control-cluster-role-bindings", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-container-host-port": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-accessing-resource-on-host", - "description": "Verifies if containers define a hostPort.", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Remove hostPort configuration from the container. CNF should avoid accessing host resources - containers should not configure HostPort." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856849398 -0500 CDT m=+20.598861160", - "state": "skipped", - "testID": { - "id": "access-control-container-host-port", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-accessing-resource-on-host", + "description": "Verifies if containers define a hostPort.", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Remove hostPort configuration from the container. CNF should avoid accessing host resources - containers should not configure HostPort." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856849398 -0500 CDT m=+20.598861160", + "state": "skipped", + "testID": { + "id": "access-control-container-host-port", + "suite": "access-control", + "tags": "common" + } + }, "access-control-crd-roles": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-custom-role-to-access-application-crds", - "description": "If an application creates CRDs it must supply a role to access those CRDs and no other API resources/permission. This test checks that there is at least one role present in each namespaces under test that only refers to CRDs under test.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Roles providing access to CRDs should not refer to any other api or resources. Change the generation of the CRD role accordingly" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857245261 -0500 CDT m=+20.599257050", - "state": "skipped", - "testID": { - "id": "access-control-crd-roles", - "suite": "access-control", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-custom-role-to-access-application-crds", + "description": "If an application creates CRDs it must supply a role to access those CRDs and no other API resources/permission. This test checks that there is at least one role present in each namespaces under test that only refers to CRDs under test.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Roles providing access to CRDs should not refer to any other api or resources. Change the generation of the CRD role accordingly" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857245261 -0500 CDT m=+20.599257050", + "state": "skipped", + "testID": { + "id": "access-control-crd-roles", + "suite": "access-control", + "tags": "extended" + } + }, "access-control-ipc-lock-capability-check": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipc_lock", - "description": "Ensures that containers do not use IPC_LOCK capability. CNF should avoid accessing host resources - spec.HostIpc should be false.", - "exceptionProcess": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856764422 -0500 CDT m=+20.598776182", - "state": "skipped", - "testID": { - "id": "access-control-ipc-lock-capability-check", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipc_lock", + "description": "Ensures that containers do not use IPC_LOCK capability. CNF should avoid accessing host resources - spec.HostIpc should be false.", + "exceptionProcess": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why.", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856764422 -0500 CDT m=+20.598776182", + "state": "skipped", + "testID": { + "id": "access-control-ipc-lock-capability-check", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-namespace": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", - "description": "Tests that all CNF's resources (PUTs and CRs) belong to valid namespaces. A valid namespace meets\nthe following conditions: (1) It was declared in the yaml config file under the targetNameSpaces\ntag. (2) It does not have any of the following prefixes: default, openshift-, istio- and aspenmesh-", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that your CNF utilizes namespaces declared in the yaml config file. Additionally, the namespaces should not start with \"default, openshift-, istio- or aspenmesh-\"." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856942295 -0500 CDT m=+20.598954056", - "state": "skipped", - "testID": { - "id": "access-control-namespace", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", + "description": "Tests that all CNF's resources (PUTs and CRs) belong to valid namespaces. A valid namespace meets\nthe following conditions: (1) It was declared in the yaml config file under the targetNameSpaces\ntag. (2) It does not have any of the following prefixes: default, openshift-, istio- and aspenmesh-", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that your CNF utilizes namespaces declared in the yaml config file. Additionally, the namespaces should not start with \"default, openshift-, istio- or aspenmesh-\"." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856942295 -0500 CDT m=+20.598954056", + "state": "skipped", + "testID": { + "id": "access-control-namespace", + "suite": "access-control", + "tags": "common" + } + }, "access-control-namespace-resource-quota": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-memory-allocation", - "description": "Checks to see if CNF workload pods are running in namespaces that have resource quotas applied.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Apply a ResourceQuota to the namespace your CNF is running in. The CNF namespace should have resource quota defined." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857087759 -0500 CDT m=+20.599099530", - "state": "skipped", - "testID": { - "id": "access-control-namespace-resource-quota", - "suite": "access-control", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-memory-allocation", + "description": "Checks to see if CNF workload pods are running in namespaces that have resource quotas applied.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Apply a ResourceQuota to the namespace your CNF is running in. The CNF namespace should have resource quota defined." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857087759 -0500 CDT m=+20.599099530", + "state": "skipped", + "testID": { + "id": "access-control-namespace-resource-quota", + "suite": "access-control", + "tags": "extended" + } + }, "access-control-net-admin-capability-check": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-net_admin", - "description": "Ensures that containers do not use NET_ADMIN capability. Note: this test also ensures iptables and nftables are not configured by CNF pods:\n- NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods.\nnftables should be configured by an administrator outside the scope of the CNF. nftables are usually configured\nby operators, for instance the Performance Addon Operator (PAO) or istio.\n- Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables\nshould be configured by an administrator outside the scope of the CNF. iptables are usually configured by operators,\nfor instance the Performance Addon Operator (PAO) or istio.", - "exceptionProcess": "Exception will be considered for user plane or networking functions (e.g. SR-IOV, Multicast). Must identify which container requires the capability and detail why.", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856720539 -0500 CDT m=+20.598732310", - "state": "skipped", - "testID": { - "id": "access-control-net-admin-capability-check", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-net_admin", + "description": "Ensures that containers do not use NET_ADMIN capability. Note: this test also ensures iptables and nftables are not configured by CNF pods:\n- NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods.\nnftables should be configured by an administrator outside the scope of the CNF. nftables are usually configured\nby operators, for instance the Performance Addon Operator (PAO) or istio.\n- Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables\nshould be configured by an administrator outside the scope of the CNF. iptables are usually configured by operators,\nfor instance the Performance Addon Operator (PAO) or istio.", + "exceptionProcess": "Exception will be considered for user plane or networking functions (e.g. SR-IOV, Multicast). Must identify which container requires the capability and detail why.", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856720539 -0500 CDT m=+20.598732310", + "state": "skipped", + "testID": { + "id": "access-control-net-admin-capability-check", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-net-raw-capability-check": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-user-plane-cnfs", - "description": "Ensures that containers do not use NET_RAW capability. Note: this test also ensures iptables and nftables are not configured by CNF pods:\n- NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods.\nnftables should be configured by an administrator outside the scope of the CNF. nftables are usually configured\nby operators, for instance the Performance Addon Operator (PAO) or istio.\n- Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables\nshould be configured by an administrator outside the scope of the CNF. iptables are usually configured by operators,\nfor instance the Performance Addon Operator (PAO) or istio.", - "exceptionProcess": "Exception will be considered for user plane or networking functions. Must identify which container requires the capability and detail why.", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856743281 -0500 CDT m=+20.598755053", - "state": "skipped", - "testID": { - "id": "access-control-net-raw-capability-check", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-user-plane-cnfs", + "description": "Ensures that containers do not use NET_RAW capability. Note: this test also ensures iptables and nftables are not configured by CNF pods:\n- NET_ADMIN and NET_RAW are required to modify nftables (namespaced) which is not desired inside pods.\nnftables should be configured by an administrator outside the scope of the CNF. nftables are usually configured\nby operators, for instance the Performance Addon Operator (PAO) or istio.\n- Privileged container are required to modify host iptables, which is not safe to perform inside pods. nftables\nshould be configured by an administrator outside the scope of the CNF. iptables are usually configured by operators,\nfor instance the Performance Addon Operator (PAO) or istio.", + "exceptionProcess": "Exception will be considered for user plane or networking functions. Must identify which container requires the capability and detail why.", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856743281 -0500 CDT m=+20.598755053", + "state": "skipped", + "testID": { + "id": "access-control-net-raw-capability-check", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-no-1337-uid": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Extended", - "description": "Checks that all pods are not using the securityContext UID 1337", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Use another process UID that is not 1337." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857137597 -0500 CDT m=+20.599149382", - "state": "skipped", - "testID": { - "id": "access-control-no-1337-uid", - "suite": "access-control", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Extended", + "description": "Checks that all pods are not using the securityContext UID 1337", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Use another process UID that is not 1337." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857137597 -0500 CDT m=+20.599149382", + "state": "skipped", + "testID": { + "id": "access-control-no-1337-uid", + "suite": "access-control", + "tags": "extended" + } + }, "access-control-one-process-per-container": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-one-process-per-container", - "description": "Check that all containers under test have only one process running", - "exceptionProcess": "No exception needed for optional/extended tests. Not applicable to SNO applications.", - "remediation": "Launch only one process per container. Should adhere to 1 process per container best practice wherever possible." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857029418 -0500 CDT m=+20.599041193", - "state": "skipped", - "testID": { - "id": "access-control-one-process-per-container", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-one-process-per-container", + "description": "Check that all containers under test have only one process running", + "exceptionProcess": "No exception needed for optional/extended tests. Not applicable to SNO applications.", + "remediation": "Launch only one process per container. Should adhere to 1 process per container best practice wherever possible." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857029418 -0500 CDT m=+20.599041193", + "state": "skipped", + "testID": { + "id": "access-control-one-process-per-container", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-automount-service-account-token": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods", - "description": "Check that all pods under test have automountServiceAccountToken set to false. Only pods that require access to the kubernetes API server should have automountServiceAccountToken set to true", - "exceptionProcess": "Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.", - "remediation": "Check that pod has automountServiceAccountToken set to false or pod is attached to service account which has automountServiceAccountToken set to false, unless the pod needs access to the kubernetes API server. Pods which do not need API access should set automountServiceAccountToken to false in pod spec." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857012338 -0500 CDT m=+20.599024109", - "state": "skipped", - "testID": { - "id": "access-control-pod-automount-service-account-token", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods", + "description": "Check that all pods under test have automountServiceAccountToken set to false. Only pods that require access to the kubernetes API server should have automountServiceAccountToken set to true", + "exceptionProcess": "Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.", + "remediation": "Check that pod has automountServiceAccountToken set to false or pod is attached to service account which has automountServiceAccountToken set to false, unless the pod needs access to the kubernetes API server. Pods which do not need API access should set automountServiceAccountToken to false in pod spec." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857012338 -0500 CDT m=+20.599024109", + "state": "skipped", + "testID": { + "id": "access-control-pod-automount-service-account-token", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-pod-host-ipc": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Verifies that the spec.HostIpc parameter is set to false", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Set the spec.HostIpc parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostIpc should be false." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856904859 -0500 CDT m=+20.598916631", - "state": "skipped", - "testID": { - "id": "access-control-pod-host-ipc", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Verifies that the spec.HostIpc parameter is set to false", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Set the spec.HostIpc parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostIpc should be false." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856904859 -0500 CDT m=+20.598916631", + "state": "skipped", + "testID": { + "id": "access-control-pod-host-ipc", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-host-network": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-the-host-network-namespace", - "description": "Verifies that the spec.HostNetwork parameter is not set (not present)", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Set the spec.HostNetwork parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostNetwork should be false." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856868532 -0500 CDT m=+20.598880316", - "state": "skipped", - "testID": { - "id": "access-control-pod-host-network", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-the-host-network-namespace", + "description": "Verifies that the spec.HostNetwork parameter is not set (not present)", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Set the spec.HostNetwork parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostNetwork should be false." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856868532 -0500 CDT m=+20.598880316", + "state": "skipped", + "testID": { + "id": "access-control-pod-host-network", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-host-path": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Verifies that the spec.HostPath parameter is not set (not present)", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Set the spec.HostPath parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostPath should be false." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856886834 -0500 CDT m=+20.598898596", - "state": "skipped", - "testID": { - "id": "access-control-pod-host-path", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Verifies that the spec.HostPath parameter is not set (not present)", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Set the spec.HostPath parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostPath should be false." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856886834 -0500 CDT m=+20.598898596", + "state": "skipped", + "testID": { + "id": "access-control-pod-host-path", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-host-pid": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Verifies that the spec.HostPid parameter is set to false", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Set the spec.HostPid parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostPid should be false." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856922953 -0500 CDT m=+20.598934724", - "state": "skipped", - "testID": { - "id": "access-control-pod-host-pid", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Verifies that the spec.HostPid parameter is set to false", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Set the spec.HostPid parameter to false in the pod configuration. CNF should avoid accessing host resources - spec.HostPid should be false." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856922953 -0500 CDT m=+20.598934724", + "state": "skipped", + "testID": { + "id": "access-control-pod-host-pid", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-role-bindings": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-rbac", - "description": "Ensures that a CNF does not utilize RoleBinding(s) in a non-CNF Namespace.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure the CNF is not configured to use RoleBinding(s) in a non-CNF Namespace. Scope of role must \u003c= scope of creator of role." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856974883 -0500 CDT m=+20.598986669", - "state": "skipped", - "testID": { - "id": "access-control-pod-role-bindings", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-security-rbac", + "description": "Ensures that a CNF does not utilize RoleBinding(s) in a non-CNF Namespace.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure the CNF is not configured to use RoleBinding(s) in a non-CNF Namespace. Scope of role must \u003c= scope of creator of role." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856974883 -0500 CDT m=+20.598986669", + "state": "skipped", + "testID": { + "id": "access-control-pod-role-bindings", + "suite": "access-control", + "tags": "common" + } + }, "access-control-pod-service-account": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-scc-permissions-for-an-application", - "description": "Tests that each CNF Pod utilizes a valid Service Account. Default or empty service account is not valid.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that the each CNF Pod is configured to use a valid Service Account" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856958713 -0500 CDT m=+20.598970483", - "state": "skipped", - "testID": { - "id": "access-control-pod-service-account", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-scc-permissions-for-an-application", + "description": "Tests that each CNF Pod utilizes a valid Service Account. Default or empty service account is not valid.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that the each CNF Pod is configured to use a valid Service Account" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856958713 -0500 CDT m=+20.598970483", + "state": "skipped", + "testID": { + "id": "access-control-pod-service-account", + "suite": "access-control", + "tags": "common" + } + }, "access-control-projected-volume-service-account-token": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods", - "description": "Checks that pods do not use projected volumes and service account tokens", - "exceptionProcess": "Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.", - "remediation": "Ensure that pods do not use projected volumes and service account tokens" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857161872 -0500 CDT m=+20.599173643", - "state": "skipped", - "testID": { - "id": "access-control-projected-volume-service-account-token", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-automount-services-for-pods", + "description": "Checks that pods do not use projected volumes and service account tokens", + "exceptionProcess": "Exception will be considered if container needs to access APIs which OCP does not offer natively. Must document which container requires which API(s) and detail why existing OCP APIs cannot be used.", + "remediation": "Ensure that pods do not use projected volumes and service account tokens" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857161872 -0500 CDT m=+20.599173643", + "state": "skipped", + "testID": { + "id": "access-control-projected-volume-service-account-token", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-requests-and-limits": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requests/limits", - "description": "Check that containers have resource requests and limits specified in their spec.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Add requests and limits to your container spec. See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857121342 -0500 CDT m=+20.599133128", - "state": "skipped", - "testID": { - "id": "access-control-requests-and-limits", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requests/limits", + "description": "Check that containers have resource requests and limits specified in their spec.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Add requests and limits to your container spec. See: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857121342 -0500 CDT m=+20.599133128", + "state": "skipped", + "testID": { + "id": "access-control-requests-and-limits", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-security-context": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Checks the security context matches one of the 4 categories", - "exceptionProcess": "no exception needed for optional/extended test", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and document why. If the container had the right configuration of the allowed category from the 4 approved list then the test will pass. The 4 categories are defined in Requirement ID 94118 of the Extended Best Practices guide (private repo)" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856684241 -0500 CDT m=+20.598696026", - "state": "skipped", - "testID": { - "id": "access-control-security-context", - "suite": "access-control", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Checks the security context matches one of the 4 categories", + "exceptionProcess": "no exception needed for optional/extended test", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and document why. If the container had the right configuration of the allowed category from the 4 approved list then the test will pass. The 4 categories are defined in Requirement ID 94118 of the Extended Best Practices guide (private repo)" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856684241 -0500 CDT m=+20.598696026", + "state": "skipped", + "testID": { + "id": "access-control-security-context", + "suite": "access-control", + "tags": "extended" + } + }, "access-control-security-context-non-root-user-check": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Checks the security context runAsUser parameter in pods and containers to make sure it is not set to uid root(0). Pods and containers should not run as root (runAsUser is not set to uid0).", - "exceptionProcess": "No exceptions - will only be considered under special circumstances. Must identify which container needs access and document why with details.", - "remediation": "Change the pod and containers \"runAsUser\" uid to something other than root(0)" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856807893 -0500 CDT m=+20.598819681", - "state": "skipped", - "testID": { - "id": "access-control-security-context-non-root-user-check", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Checks the security context runAsUser parameter in pods and containers to make sure it is not set to uid root(0). Pods and containers should not run as root (runAsUser is not set to uid0).", + "exceptionProcess": "No exceptions - will only be considered under special circumstances. Must identify which container needs access and document why with details.", + "remediation": "Change the pod and containers \"runAsUser\" uid to something other than root(0)" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856807893 -0500 CDT m=+20.598819681", + "state": "skipped", + "testID": { + "id": "access-control-security-context-non-root-user-check", + "suite": "access-control", + "tags": "common" + } + }, "access-control-security-context-privilege-escalation": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Checks if privileged escalation is enabled (AllowPrivilegeEscalation=true).", - "exceptionProcess": "No exceptions", - "remediation": "Configure privilege escalation to false. Privileged escalation should not be allowed (AllowPrivilegeEscalation=false)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856831849 -0500 CDT m=+20.598843609", - "state": "skipped", - "testID": { - "id": "access-control-security-context-privilege-escalation", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Checks if privileged escalation is enabled (AllowPrivilegeEscalation=true).", + "exceptionProcess": "No exceptions", + "remediation": "Configure privilege escalation to false. Privileged escalation should not be allowed (AllowPrivilegeEscalation=false)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856831849 -0500 CDT m=+20.598843609", + "state": "skipped", + "testID": { + "id": "access-control-security-context-privilege-escalation", + "suite": "access-control", + "tags": "common" + } + }, "access-control-service-type": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-the-host-network-namespace", - "description": "Tests that each CNF Service does not utilize NodePort(s).", - "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", - "remediation": "Ensure Services are not configured to use NodePort(s).CNF should avoid accessing host resources - tests that each CNF Service does not utilize NodePort(s)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857178269 -0500 CDT m=+20.599190028", - "state": "skipped", - "testID": { - "id": "access-control-service-type", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-the-host-network-namespace", + "description": "Tests that each CNF Service does not utilize NodePort(s).", + "exceptionProcess": "Exception for host resource access tests will only be considered in rare cases where it is absolutely needed", + "remediation": "Ensure Services are not configured to use NodePort(s).CNF should avoid accessing host resources - tests that each CNF Service does not utilize NodePort(s)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857178269 -0500 CDT m=+20.599190028", + "state": "skipped", + "testID": { + "id": "access-control-service-type", + "suite": "access-control", + "tags": "common" + } + }, "access-control-ssh-daemons": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-interaction/configuration", - "description": "Check that pods do not run SSH daemons.", - "exceptionProcess": "No exceptions - special consideration can be given to certain containers which run as utility tool daemon", - "remediation": "Ensure that no SSH daemons are running inside a pod. Pods should not run as SSH Daemons (replicaset or statefulset only)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857103844 -0500 CDT m=+20.599115614", - "state": "skipped", - "testID": { - "id": "access-control-ssh-daemons", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-interaction/configuration", + "description": "Check that pods do not run SSH daemons.", + "exceptionProcess": "No exceptions - special consideration can be given to certain containers which run as utility tool daemon", + "remediation": "Ensure that no SSH daemons are running inside a pod. Pods should not run as SSH Daemons (replicaset or statefulset only)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857103844 -0500 CDT m=+20.599115614", + "state": "skipped", + "testID": { + "id": "access-control-ssh-daemons", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-sys-admin-capability-check": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-sys_admin", - "description": "Ensures that containers do not use SYS_ADMIN capability", - "exceptionProcess": "No exceptions", - "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. Containers should not use the SYS_ADMIN Linux capability." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856700229 -0500 CDT m=+20.598711990", - "state": "skipped", - "testID": { - "id": "access-control-sys-admin-capability-check", - "suite": "access-control", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-avoid-sys_admin", + "description": "Ensures that containers do not use SYS_ADMIN capability", + "exceptionProcess": "No exceptions", + "remediation": "Exception possible if CNF uses mlock(), mlockall(), shmctl(), mmap(); exception will be considered for DPDK applications. Must identify which container requires the capability and detail why. Containers should not use the SYS_ADMIN Linux capability." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856700229 -0500 CDT m=+20.598711990", + "state": "skipped", + "testID": { + "id": "access-control-sys-admin-capability-check", + "suite": "access-control", + "tags": "common" + } + }, "access-control-sys-nice-realtime-capability": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_nice", - "description": "Check that pods running on nodes with realtime kernel enabled have the SYS_NICE capability enabled in their spec. In the case that a CNF is running on a node using the real-time kernel, SYS_NICE will be used to allow DPDK application to switch to SCHED_FIFO.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "If pods are scheduled to realtime kernel nodes, they must add SYS_NICE capability to their spec." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857046186 -0500 CDT m=+20.599057972", - "state": "skipped", - "testID": { - "id": "access-control-sys-nice-realtime-capability", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_nice", + "description": "Check that pods running on nodes with realtime kernel enabled have the SYS_NICE capability enabled in their spec. In the case that a CNF is running on a node using the real-time kernel, SYS_NICE will be used to allow DPDK application to switch to SCHED_FIFO.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "If pods are scheduled to realtime kernel nodes, they must add SYS_NICE capability to their spec." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857046186 -0500 CDT m=+20.599057972", + "state": "skipped", + "testID": { + "id": "access-control-sys-nice-realtime-capability", + "suite": "access-control", + "tags": "telco" + } + }, "access-control-sys-ptrace-capability": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_ptrace", - "description": "Check that if process namespace sharing is enabled for a Pod then the SYS_PTRACE capability is allowed. This capability is required when using Process Namespace Sharing. This is used when processes from one Container need to be exposed to another Container. For example, to send signals like SIGHUP from a process in a Container to another process in another Container. For more information on these capabilities refer to https://cloud.redhat.com/blog/linux-capabilities-in-openshift and https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Allow the SYS_PTRACE capability when enabling process namespace sharing for a Pod" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857071616 -0500 CDT m=+20.599083378", - "state": "skipped", - "testID": { - "id": "access-control-sys-ptrace-capability", - "suite": "access-control", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-sys_ptrace", + "description": "Check that if process namespace sharing is enabled for a Pod then the SYS_PTRACE capability is allowed. This capability is required when using Process Namespace Sharing. This is used when processes from one Container need to be exposed to another Container. For example, to send signals like SIGHUP from a process in a Container to another process in another Container. For more information on these capabilities refer to https://cloud.redhat.com/blog/linux-capabilities-in-openshift and https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Allow the SYS_PTRACE capability when enabling process namespace sharing for a Pod" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857071616 -0500 CDT m=+20.599083378", + "state": "skipped", + "testID": { + "id": "access-control-sys-ptrace-capability", + "suite": "access-control", + "tags": "telco" + } + }, "affiliated-certification-container-is-certified": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", - "description": "Tests whether container images listed in the configuration file have passed the Red Hat Container Certification Program (CCP).", - "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", - "remediation": "Ensure that your container has passed the Red Hat Container Certification Program (CCP)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856226961 -0500 CDT m=+20.598238723", - "state": "skipped", - "testID": { - "id": "affiliated-certification-container-is-certified", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", + "description": "Tests whether container images listed in the configuration file have passed the Red Hat Container Certification Program (CCP).", + "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", + "remediation": "Ensure that your container has passed the Red Hat Container Certification Program (CCP)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856226961 -0500 CDT m=+20.598238723", + "state": "skipped", + "testID": { + "id": "affiliated-certification-container-is-certified", + "suite": "affiliated-certification", + "tags": "common" + } + }, "affiliated-certification-container-is-certified-digest": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", - "description": "Tests whether container images that are autodiscovered have passed the Red Hat Container Certification Program by their digest(CCP).", - "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", - "remediation": "Ensure that your container has passed the Red Hat Container Certification Program (CCP)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856287616 -0500 CDT m=+20.598299387", - "state": "skipped", - "testID": { - "id": "affiliated-certification-container-is-certified-digest", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", + "description": "Tests whether container images that are autodiscovered have passed the Red Hat Container Certification Program by their digest(CCP).", + "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", + "remediation": "Ensure that your container has passed the Red Hat Container Certification Program (CCP)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856287616 -0500 CDT m=+20.598299387", + "state": "skipped", + "testID": { + "id": "affiliated-certification-container-is-certified-digest", + "suite": "affiliated-certification", + "tags": "common" + } + }, "affiliated-certification-helm-version": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-helm", - "description": "Test to check if the helm chart is v3", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Check Helm Chart is v3 and not v2 which is not supported due to security risks associated with Tiller." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856193994 -0500 CDT m=+20.598205756", - "state": "skipped", - "testID": { - "id": "affiliated-certification-helm-version", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-helm", + "description": "Test to check if the helm chart is v3", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Check Helm Chart is v3 and not v2 which is not supported due to security risks associated with Tiller." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856193994 -0500 CDT m=+20.598205756", + "state": "skipped", + "testID": { + "id": "affiliated-certification-helm-version", + "suite": "affiliated-certification", + "tags": "common" + } + }, "affiliated-certification-helmchart-is-certified": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", - "description": "Tests whether helm charts listed in the cluster passed the Red Hat Helm Certification Program.", - "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", - "remediation": "Ensure that the helm charts under test passed the Red Hat's helm Certification Program (e.g. listed in https://charts.openshift.io/index.yaml)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856267896 -0500 CDT m=+20.598279669", - "state": "skipped", - "testID": { - "id": "affiliated-certification-helmchart-is-certified", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/certify-your-application/overview", + "description": "Tests whether helm charts listed in the cluster passed the Red Hat Helm Certification Program.", + "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", + "remediation": "Ensure that the helm charts under test passed the Red Hat's helm Certification Program (e.g. listed in https://charts.openshift.io/index.yaml)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856267896 -0500 CDT m=+20.598279669", + "state": "skipped", + "testID": { + "id": "affiliated-certification-helmchart-is-certified", + "suite": "affiliated-certification", + "tags": "common" + } + }, "affiliated-certification-operator-is-certified": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "Tests whether CNF Operators listed in the configuration file have passed the Red Hat Operator Certification Program (OCP).", - "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", - "remediation": "Ensure that your Operator has passed Red Hat's Operator Certification Program (OCP)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856250106 -0500 CDT m=+20.598261891", - "state": "skipped", - "testID": { - "id": "affiliated-certification-operator-is-certified", - "suite": "affiliated-certification", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "Tests whether CNF Operators listed in the configuration file have passed the Red Hat Operator Certification Program (OCP).", + "exceptionProcess": "There is no documented exception process for this.Partner can run CNF Certification test suite before passing other certifications (Container/Operator/HelmChart) but the affiliated certification test cases in CNF Certification test suite must be re-run once the other certifications have been granted.", + "remediation": "Ensure that your Operator has passed Red Hat's Operator Certification Program (OCP)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856250106 -0500 CDT m=+20.598261891", + "state": "skipped", + "testID": { + "id": "affiliated-certification-operator-is-certified", + "suite": "affiliated-certification", + "tags": "common" + } + }, "lifecycle-affinity-required-pods": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Checks that affinity rules are in place if AffinityRequired: 'true' labels are set on Pods.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Pods which need to be co-located on the same node need Affinity rules. If a pod/statefulset/deployment is required to use affinity rules, please add AffinityRequired: 'true' as a label." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.286023847 -0500 CDT m=+20.028035643", - "state": "skipped", - "testID": { - "id": "lifecycle-affinity-required-pods", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Checks that affinity rules are in place if AffinityRequired: 'true' labels are set on Pods.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Pods which need to be co-located on the same node need Affinity rules. If a pod/statefulset/deployment is required to use affinity rules, please add AffinityRequired: 'true' as a label." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.286023847 -0500 CDT m=+20.028035643", + "state": "skipped", + "testID": { + "id": "lifecycle-affinity-required-pods", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-container-shutdown": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices", - "description": "Ensure that the containers lifecycle preStop management feature is configured. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. When pods are shut down by the platform they are sent a SIGTERM signal which means that the process in the container should start shutting down, closing connections and stopping all activity. If the pod doesn’t shut down within the default 30 seconds then the platform may send a SIGKILL signal which will stop the pod immediately. This method isn’t as clean and the default time between the SIGTERM and SIGKILL messages can be modified based on the requirements of the application. Containers should respond to SIGTERM/SIGKILL with graceful shutdown.", - "exceptionProcess": "Identify which pod is not conforming to the process and submit information as to why it cannot use a preStop shutdown specification.", - "remediation": "The preStop can be used to gracefully stop the container and clean resources (e.g., DB connection). For details, see https://www.containiq.com/post/kubernetes-container-lifecycle-events-and-hooks and https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. All pods must respond to SIGTERM signal and shutdown gracefully with a zero exit code." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285477659 -0500 CDT m=+20.027489443", - "state": "skipped", - "testID": { - "id": "lifecycle-container-shutdown", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices", + "description": "Ensure that the containers lifecycle preStop management feature is configured. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. When pods are shut down by the platform they are sent a SIGTERM signal which means that the process in the container should start shutting down, closing connections and stopping all activity. If the pod doesn’t shut down within the default 30 seconds then the platform may send a SIGKILL signal which will stop the pod immediately. This method isn’t as clean and the default time between the SIGTERM and SIGKILL messages can be modified based on the requirements of the application. Containers should respond to SIGTERM/SIGKILL with graceful shutdown.", + "exceptionProcess": "Identify which pod is not conforming to the process and submit information as to why it cannot use a preStop shutdown specification.", + "remediation": "The preStop can be used to gracefully stop the container and clean resources (e.g., DB connection). For details, see https://www.containiq.com/post/kubernetes-container-lifecycle-events-and-hooks and https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. All pods must respond to SIGTERM signal and shutdown gracefully with a zero exit code." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285477659 -0500 CDT m=+20.027489443", + "state": "skipped", + "testID": { + "id": "lifecycle-container-shutdown", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-container-startup": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices", - "description": "Ensure that the containers lifecycle postStart management feature is configured. A container must receive important events from the platform and conform/react to these events properly. For example, a container should catch SIGTERM or SIGKILL from the platform and shutdown as quickly as possible. Other typically important events from the platform are PostStart to initialize before servicing requests and PreStop to release resources cleanly before shutting down.", - "exceptionProcess": "Identify which pod is not conforming to the process and submit information as to why it cannot use a postStart startup specification.", - "remediation": "PostStart is normally used to configure the container, set up dependencies, and record the new creation. You could use this event to check that a required API is available before the container’s main work begins. Kubernetes will not change the container’s state to Running until the PostStart script has executed successfully. For details, see https://www.containiq.com/post/kubernetes-container-lifecycle-events-and-hooks and https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. PostStart is used to configure container, set up dependencies, record new creation. It can also be used to check that a required API is available before the container’s work begins." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285566546 -0500 CDT m=+20.027578321", - "state": "skipped", - "testID": { - "id": "lifecycle-container-startup", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cloud-native-design-best-practices", + "description": "Ensure that the containers lifecycle postStart management feature is configured. A container must receive important events from the platform and conform/react to these events properly. For example, a container should catch SIGTERM or SIGKILL from the platform and shutdown as quickly as possible. Other typically important events from the platform are PostStart to initialize before servicing requests and PreStop to release resources cleanly before shutting down.", + "exceptionProcess": "Identify which pod is not conforming to the process and submit information as to why it cannot use a postStart startup specification.", + "remediation": "PostStart is normally used to configure the container, set up dependencies, and record the new creation. You could use this event to check that a required API is available before the container’s main work begins. Kubernetes will not change the container’s state to Running until the PostStart script has executed successfully. For details, see https://www.containiq.com/post/kubernetes-container-lifecycle-events-and-hooks and https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks. PostStart is used to configure container, set up dependencies, record new creation. It can also be used to check that a required API is available before the container’s work begins." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285566546 -0500 CDT m=+20.027578321", + "state": "skipped", + "testID": { + "id": "lifecycle-container-startup", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-cpu-isolation": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-isolation", - "description": "CPU isolation requires: For each container within the pod, resource requests and limits must be identical. Request and Limits are in the form of whole CPUs. The runTimeClassName must be specified. Annotations required disabling CPU and IRQ load-balancing.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "CPU isolation testing is enabled. Please ensure that all pods adhere to the CPU isolation requirements." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.28599402 -0500 CDT m=+20.028005803", - "state": "skipped", - "testID": { - "id": "lifecycle-cpu-isolation", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-isolation", + "description": "CPU isolation requires: For each container within the pod, resource requests and limits must be identical. Request and Limits are in the form of whole CPUs. The runTimeClassName must be specified. Annotations required disabling CPU and IRQ load-balancing.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "CPU isolation testing is enabled. Please ensure that all pods adhere to the CPU isolation requirements." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.28599402 -0500 CDT m=+20.028005803", + "state": "skipped", + "testID": { + "id": "lifecycle-cpu-isolation", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-crd-scaling": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Tests that CNF crd support scale in/out operations. First, the test starts getting the current replicaCount (N) of the crd/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the crd/s. In case of crd that are managed by HPA the test is changing the min and max value to crd Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the crd/s", - "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", - "remediation": "Ensure CNF crd/replica sets can scale in/out successfully." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.28554559 -0500 CDT m=+20.027557369", - "state": "skipped", - "testID": { - "id": "lifecycle-crd-scaling", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Tests that CNF crd support scale in/out operations. First, the test starts getting the current replicaCount (N) of the crd/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the crd/s. In case of crd that are managed by HPA the test is changing the min and max value to crd Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the crd/s", + "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", + "remediation": "Ensure CNF crd/replica sets can scale in/out successfully." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.28554559 -0500 CDT m=+20.027557369", + "state": "skipped", + "testID": { + "id": "lifecycle-crd-scaling", + "suite": "lifecycle", + "tags": "common" + } + }, "lifecycle-deployment-scaling": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Tests that CNF deployments support scale in/out operations. First, the test starts getting the current replicaCount (N) of the deployment/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the deployment/s. In case of deployments that are managed by HPA the test is changing the min and max value to deployment Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the deployment/s", - "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", - "remediation": "Ensure CNF deployments/replica sets can scale in/out successfully." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285906406 -0500 CDT m=+20.027918193", - "state": "skipped", - "testID": { - "id": "lifecycle-deployment-scaling", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Tests that CNF deployments support scale in/out operations. First, the test starts getting the current replicaCount (N) of the deployment/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the deployment/s. In case of deployments that are managed by HPA the test is changing the min and max value to deployment Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the deployment/s", + "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", + "remediation": "Ensure CNF deployments/replica sets can scale in/out successfully." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285906406 -0500 CDT m=+20.027918193", + "state": "skipped", + "testID": { + "id": "lifecycle-deployment-scaling", + "suite": "lifecycle", + "tags": "common" + } + }, "lifecycle-image-pull-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-use-imagepullpolicy-if-not-present", - "description": "Ensure that the containers under test are using IfNotPresent as Image Pull Policy. If there is a situation where the container dies and needs to be restarted, the image pull policy becomes important. PullIfNotPresent is recommended so that a loss of image registry access does not prevent the pod from restarting.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the containers under test are using IfNotPresent as Image Pull Policy." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285629812 -0500 CDT m=+20.027641607", - "state": "skipped", - "testID": { - "id": "lifecycle-image-pull-policy", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-use-imagepullpolicy-if-not-present", + "description": "Ensure that the containers under test are using IfNotPresent as Image Pull Policy. If there is a situation where the container dies and needs to be restarted, the image pull policy becomes important. PullIfNotPresent is recommended so that a loss of image registry access does not prevent the pod from restarting.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the containers under test are using IfNotPresent as Image Pull Policy." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285629812 -0500 CDT m=+20.027641607", + "state": "skipped", + "testID": { + "id": "lifecycle-image-pull-policy", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-liveness-probe": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Check that all containers under test have liveness probe defined. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. When starting up, health probes like liveness and readiness checks can be put into place to ensure the application is functioning properly.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Add a liveness probe to deployed containers. CNFs shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285690183 -0500 CDT m=+20.027701972", - "state": "skipped", - "testID": { - "id": "lifecycle-liveness-probe", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Check that all containers under test have liveness probe defined. The most basic requirement for the lifecycle management of Pods in OpenShift are the ability to start and stop correctly. When starting up, health probes like liveness and readiness checks can be put into place to ensure the application is functioning properly.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Add a liveness probe to deployed containers. CNFs shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285690183 -0500 CDT m=+20.027701972", + "state": "skipped", + "testID": { + "id": "lifecycle-liveness-probe", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-persistent-volume-reclaim-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-csi", - "description": "Check that the persistent volumes the CNF pods are using have a reclaim policy of delete. Network Functions should clear persistent storage by deleting their PVs when removing their application from a cluster.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that all persistent volumes are using the reclaim policy: delete" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285964275 -0500 CDT m=+20.027976061", - "state": "skipped", - "testID": { - "id": "lifecycle-persistent-volume-reclaim-policy", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-csi", + "description": "Check that the persistent volumes the CNF pods are using have a reclaim policy of delete. Network Functions should clear persistent storage by deleting their PVs when removing their application from a cluster.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that all persistent volumes are using the reclaim policy: delete" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285964275 -0500 CDT m=+20.027976061", + "state": "skipped", + "testID": { + "id": "lifecycle-persistent-volume-reclaim-policy", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-pod-high-availability": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Ensures that CNF Pods specify podAntiAffinity rules and replica value is set to more than 1.", - "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", - "remediation": "In high availability cases, Pod podAntiAffinity rule should be specified for pod scheduling and pod replica value is set to more than 1 ." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285789316 -0500 CDT m=+20.027801103", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-high-availability", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Ensures that CNF Pods specify podAntiAffinity rules and replica value is set to more than 1.", + "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", + "remediation": "In high availability cases, Pod podAntiAffinity rule should be specified for pod scheduling and pod replica value is set to more than 1 ." }, - "lifecycle-pod-owner-type": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-no-naked-pods", - "description": "Tests that CNF Pod(s) are deployed as part of a ReplicaSet(s)/StatefulSet(s).", - "exceptionProcess": "There is no documented exception process for this. Pods should not be deployed as DaemonSet or naked pods.", - "remediation": "Deploy the CNF using ReplicaSet/StatefulSet." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285756811 -0500 CDT m=+20.027768606", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-owner-type", - "suite": "lifecycle", - "tags": "telco" - } + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285789316 -0500 CDT m=+20.027801103", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-high-availability", + "suite": "lifecycle", + "tags": "common" + } + }, + "lifecycle-pod-owner-type": { + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-no-naked-pods", + "description": "Tests that CNF Pod(s) are deployed as part of a ReplicaSet(s)/StatefulSet(s).", + "exceptionProcess": "There is no documented exception process for this. Pods should not be deployed as DaemonSet or naked pods.", + "remediation": "Deploy the CNF using ReplicaSet/StatefulSet." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285756811 -0500 CDT m=+20.027768606", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-owner-type", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-pod-recreation": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations", - "description": "Tests that a CNF is configured to support High Availability. First, this test cordons and drains a Node that hosts the CNF Pod. Next, the test ensures that OpenShift can re-instantiate the Pod on another Node, and that the actual replica count matches the desired replica count.", - "exceptionProcess": "No exceptions - workloads should be able to be restarted/recreated.", - "remediation": "Ensure that CNF Pod(s) utilize a configuration that supports High Availability. Additionally, ensure that there are available Nodes in the OpenShift cluster that can be utilized in the event that a host Node fails." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285875149 -0500 CDT m=+20.027886942", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-recreation", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations", + "description": "Tests that a CNF is configured to support High Availability. First, this test cordons and drains a Node that hosts the CNF Pod. Next, the test ensures that OpenShift can re-instantiate the Pod on another Node, and that the actual replica count matches the desired replica count.", + "exceptionProcess": "No exceptions - workloads should be able to be restarted/recreated.", + "remediation": "Ensure that CNF Pod(s) utilize a configuration that supports High Availability. Additionally, ensure that there are available Nodes in the OpenShift cluster that can be utilized in the event that a host Node fails." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285875149 -0500 CDT m=+20.027886942", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-recreation", + "suite": "lifecycle", + "tags": "common" + } + }, "lifecycle-pod-scheduling": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Ensures that CNF Pods do not specify nodeSelector or nodeAffinity. In most cases, Pods should allow for instantiation on any underlying Node. CNFs shall not use node selectors nor taints/tolerations to assign pod location.", - "exceptionProcess": "Exception will only be considered if application requires specialized hardware. Must specify which container requires special hardware and why.", - "remediation": "In most cases, Pod's should not specify their host Nodes through nodeSelector or nodeAffinity. However, there are cases in which CNFs require specialized hardware specific to a particular class of Node." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.2858423 -0500 CDT m=+20.027854085", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-scheduling", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Ensures that CNF Pods do not specify nodeSelector or nodeAffinity. In most cases, Pods should allow for instantiation on any underlying Node. CNFs shall not use node selectors nor taints/tolerations to assign pod location.", + "exceptionProcess": "Exception will only be considered if application requires specialized hardware. Must specify which container requires special hardware and why.", + "remediation": "In most cases, Pod's should not specify their host Nodes through nodeSelector or nodeAffinity. However, there are cases in which CNFs require specialized hardware specific to a particular class of Node." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.2858423 -0500 CDT m=+20.027854085", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-scheduling", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-pod-toleration-bypass": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-taints-and-tolerations", - "description": "Check that pods do not have NoExecute, PreferNoSchedule, or NoSchedule tolerations that have been modified from the default.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Do not allow pods to bypass the NoExecute, PreferNoSchedule, or NoSchedule tolerations that are default applied by Kubernetes." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.286054539 -0500 CDT m=+20.028066331", - "state": "skipped", - "testID": { - "id": "lifecycle-pod-toleration-bypass", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-taints-and-tolerations", + "description": "Check that pods do not have NoExecute, PreferNoSchedule, or NoSchedule tolerations that have been modified from the default.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Do not allow pods to bypass the NoExecute, PreferNoSchedule, or NoSchedule tolerations that are default applied by Kubernetes." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.286054539 -0500 CDT m=+20.028066331", + "state": "skipped", + "testID": { + "id": "lifecycle-pod-toleration-bypass", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-readiness-probe": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Check that all containers under test have readiness probe defined. There are different ways a pod can stop on on OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Add a readiness probe to deployed containers" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285663558 -0500 CDT m=+20.027675334", - "state": "skipped", - "testID": { - "id": "lifecycle-readiness-probe", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Check that all containers under test have readiness probe defined. There are different ways a pod can stop on on OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Add a readiness probe to deployed containers" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285663558 -0500 CDT m=+20.027675334", + "state": "skipped", + "testID": { + "id": "lifecycle-readiness-probe", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-startup-probe": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status", - "description": "Check that all containers under test have startup probe defined. CNFs shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Add a startup probe to deployed containers" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285721889 -0500 CDT m=+20.027733675", - "state": "skipped", - "testID": { - "id": "lifecycle-startup-probe", - "suite": "lifecycle", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status", + "description": "Check that all containers under test have startup probe defined. CNFs shall self-recover from common failures like pod failure, host failure, and network failure. Kubernetes native mechanisms such as health-checks (Liveness, Readiness and Startup Probes) shall be employed at a minimum.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Add a startup probe to deployed containers" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285721889 -0500 CDT m=+20.027733675", + "state": "skipped", + "testID": { + "id": "lifecycle-startup-probe", + "suite": "lifecycle", + "tags": "telco" + } + }, "lifecycle-statefulset-scaling": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Tests that CNF statefulsets support scale in/out operations. First, the test starts getting the current replicaCount (N) of the statefulset/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the statefulset/s. In case of statefulsets that are managed by HPA the test is changing the min and max value to statefulset Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the statefulset/s", - "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", - "remediation": "Ensure CNF statefulsets/replica sets can scale in/out successfully." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.285933712 -0500 CDT m=+20.027945508", - "state": "skipped", - "testID": { - "id": "lifecycle-statefulset-scaling", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Tests that CNF statefulsets support scale in/out operations. First, the test starts getting the current replicaCount (N) of the statefulset/s with the Pod Under Test. Then, it executes the scale-in oc command for (N-1) replicas. Lastly, it executes the scale-out oc command, restoring the original replicaCount of the statefulset/s. In case of statefulsets that are managed by HPA the test is changing the min and max value to statefulset Replica - 1 during scale-in and the original replicaCount again for both min/max during the scale-out stage. Lastly its restoring the original min/max replica of the statefulset/s", + "exceptionProcess": "There is no documented exception process for this. Not applicable to SNO applications.", + "remediation": "Ensure CNF statefulsets/replica sets can scale in/out successfully." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.285933712 -0500 CDT m=+20.027945508", + "state": "skipped", + "testID": { + "id": "lifecycle-statefulset-scaling", + "suite": "lifecycle", + "tags": "common" + } + }, "lifecycle-storage-required-pods": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-local-storage", - "description": "Checks that pods do not place persistent volumes on local storage.", - "exceptionProcess": "No exceptions", - "remediation": "If pod is StatefulSet, make sure servicename is not local-storage (persistent volumes should not be on local storage)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.286109176 -0500 CDT m=+20.028120972", - "state": "skipped", - "testID": { - "id": "lifecycle-storage-required-pods", - "suite": "lifecycle", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-local-storage", + "description": "Checks that pods do not place persistent volumes on local storage.", + "exceptionProcess": "No exceptions", + "remediation": "If pod is StatefulSet, make sure servicename is not local-storage (persistent volumes should not be on local storage)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.286109176 -0500 CDT m=+20.028120972", + "state": "skipped", + "testID": { + "id": "lifecycle-storage-required-pods", + "suite": "lifecycle", + "tags": "common" + } + }, "manageability-container-port-name-format": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", - "description": "Check that the container's ports name follow the naming conventions. Name field in ContainerPort section must be of form `\u003cprotocol\u003e[-\u003csuffix\u003e]`. More naming convention requirements may be released in future", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure that the container's ports name follow our partner naming conventions" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856667713 -0500 CDT m=+20.598679484", - "state": "skipped", - "testID": { - "id": "manageability-container-port-name-format", - "suite": "manageability", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", + "description": "Check that the container's ports name follow the naming conventions. Name field in ContainerPort section must be of form `\u003cprotocol\u003e[-\u003csuffix\u003e]`. More naming convention requirements may be released in future", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure that the container's ports name follow our partner naming conventions" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856667713 -0500 CDT m=+20.598679484", + "state": "skipped", + "testID": { + "id": "manageability-container-port-name-format", + "suite": "manageability", + "tags": "extended" + } + }, "manageability-containers-image-tag": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-tagging", - "description": "Check that image tag exists on containers.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure that all the container images are tagged. Checks containers have image tags (e.g. latest, stable, dev)." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856649417 -0500 CDT m=+20.598661177", - "state": "skipped", - "testID": { - "id": "manageability-containers-image-tag", - "suite": "manageability", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-tagging", + "description": "Check that image tag exists on containers.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure that all the container images are tagged. Checks containers have image tags (e.g. latest, stable, dev)." }, - "networking-dpdk-cpu-pinning-exec-probe": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning", - "description": "If a CNF is doing CPU pinning, exec probes may not be used.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "If the CNF is doing CPU pinning and running a DPDK process do not use exec probes (executing a command within the container) as it may pile up and block the node eventually." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856492551 -0500 CDT m=+20.598504312", - "state": "skipped", - "testID": { - "id": "networking-dpdk-cpu-pinning-exec-probe", - "suite": "networking", - "tags": "telco" - } + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856649417 -0500 CDT m=+20.598661177", + "state": "skipped", + "testID": { + "id": "manageability-containers-image-tag", + "suite": "manageability", + "tags": "extended" + } + }, + "performance-cpu-pinning-no-exec-probes": { + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning", + "description": "Workloads utilizing CPU pinning (Guaranteed QoS with exclusive CPUs) should not use exec probes. Exec probes run a command within the container, which could interfere with latency-sensitive workloads and cause performance degradation.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Workloads that use CPU pinning (Guaranteed QoS with exclusive CPUs) should not use exec probes. Use httpGet or tcpSocket probes instead, as exec probes can interfere with latency-sensitive workloads requiring non-interruptible task execution." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856492551 -0500 CDT m=+20.598504312", + "state": "skipped", + "testID": { + "id": "performance-cpu-pinning-no-exec-probes", + "suite": "performance", + "tags": "telco" + } + }, "networking-dual-stack-service": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", - "description": "Checks that all services in namespaces under test are either ipv6 single stack or dual stack. This test case requires the deployment of the probe daemonset.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Configure every CNF services with either a single stack ipv6 or dual stack (ipv4/ipv6) load balancer." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856411763 -0500 CDT m=+20.598423524", - "state": "skipped", - "testID": { - "id": "networking-dual-stack-service", - "suite": "networking", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", + "description": "Checks that all services in namespaces under test are either ipv6 single stack or dual stack. This test case requires the deployment of the probe daemonset.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Configure every CNF services with either a single stack ipv6 or dual stack (ipv4/ipv6) load balancer." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856411763 -0500 CDT m=+20.598423524", + "state": "skipped", + "testID": { + "id": "networking-dual-stack-service", + "suite": "networking", + "tags": "extended" + } + }, "networking-icmpv4-connectivity": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", - "description": "Checks that each CNF Container is able to communicate via ICMPv4 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset.", - "exceptionProcess": "No exceptions - must be able to communicate on default network using IPv4", - "remediation": "Ensure that the CNF is able to communicate via the Default OpenShift network. In some rare cases, CNFs may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856305157 -0500 CDT m=+20.598316941", - "state": "skipped", - "testID": { - "id": "networking-icmpv4-connectivity", - "suite": "networking", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", + "description": "Checks that each CNF Container is able to communicate via ICMPv4 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset.", + "exceptionProcess": "No exceptions - must be able to communicate on default network using IPv4", + "remediation": "Ensure that the CNF is able to communicate via the Default OpenShift network. In some rare cases, CNFs may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856305157 -0500 CDT m=+20.598316941", + "state": "skipped", + "testID": { + "id": "networking-icmpv4-connectivity", + "suite": "networking", + "tags": "common" + } + }, "networking-icmpv4-connectivity-multus": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Checks that each CNF Container is able to communicate via ICMPv4 on the Multus network(s). This test case requires the Deployment of the probe daemonset.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the CNF is able to communicate via the Multus network(s). In some rare cases, CNFs may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if MULTUS is not supported." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.85632423 -0500 CDT m=+20.598336005", - "state": "skipped", - "testID": { - "id": "networking-icmpv4-connectivity-multus", - "suite": "networking", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Checks that each CNF Container is able to communicate via ICMPv4 on the Multus network(s). This test case requires the Deployment of the probe daemonset.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the CNF is able to communicate via the Multus network(s). In some rare cases, CNFs may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv4 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if MULTUS is not supported." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.85632423 -0500 CDT m=+20.598336005", + "state": "skipped", + "testID": { + "id": "networking-icmpv4-connectivity-multus", + "suite": "networking", + "tags": "telco" + } + }, "networking-icmpv6-connectivity": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", - "description": "Checks that each CNF Container is able to communicate via ICMPv6 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the CNF is able to communicate via the Default OpenShift network. In some rare cases, CNFs may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if IPv6 is not supported." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856343657 -0500 CDT m=+20.598355431", - "state": "skipped", - "testID": { - "id": "networking-icmpv6-connectivity", - "suite": "networking", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-\u0026-ipv6", + "description": "Checks that each CNF Container is able to communicate via ICMPv6 on the Default OpenShift network. This test case requires the Deployment of the probe daemonset.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the CNF is able to communicate via the Default OpenShift network. In some rare cases, CNFs may require routing table changes in order to communicate over the Default network. To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it. The label value is trivial, only its presence. Not applicable if IPv6 is not supported." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856343657 -0500 CDT m=+20.598355431", + "state": "skipped", + "testID": { + "id": "networking-icmpv6-connectivity", + "suite": "networking", + "tags": "common" + } + }, "networking-icmpv6-connectivity-multus": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Checks that each CNF Container is able to communicate via ICMPv6 on the Multus network(s). This test case requires the Deployment of the probe daemonset.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the CNF is able to communicate via the Multus network(s). In some rare cases, CNFs may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it.The label value is trivial, only its presence. Not applicable if IPv6/MULTUS is not supported." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856361768 -0500 CDT m=+20.598373549", - "state": "skipped", - "testID": { - "id": "networking-icmpv6-connectivity-multus", - "suite": "networking", - "tags": "telco" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Checks that each CNF Container is able to communicate via ICMPv6 on the Multus network(s). This test case requires the Deployment of the probe daemonset.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the CNF is able to communicate via the Multus network(s). In some rare cases, CNFs may require routing table changes in order to communicate over the Multus network(s). To exclude a particular pod from ICMPv6 connectivity tests, add the redhat-best-practices-for-k8s.com/skip_connectivity_tests label to it.The label value is trivial, only its presence. Not applicable if IPv6/MULTUS is not supported." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856361768 -0500 CDT m=+20.598373549", + "state": "skipped", + "testID": { + "id": "networking-icmpv6-connectivity-multus", + "suite": "networking", + "tags": "telco" + } + }, "networking-network-policy-deny-all": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-vrfs-aka-routing-instances", - "description": "Check that network policies attached to namespaces running CNF pods contain a default deny-all rule for both ingress and egress traffic", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure that a NetworkPolicy with a default deny-all is applied. After the default is applied, apply a network policy to allow the traffic your application requires." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856436427 -0500 CDT m=+20.598448217", - "state": "skipped", - "testID": { - "id": "networking-network-policy-deny-all", - "suite": "networking", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-vrfs-aka-routing-instances", + "description": "Check that network policies attached to namespaces running CNF pods contain a default deny-all rule for both ingress and egress traffic", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure that a NetworkPolicy with a default deny-all is applied. After the default is applied, apply a network policy to allow the traffic your application requires." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856436427 -0500 CDT m=+20.598448217", + "state": "skipped", + "testID": { + "id": "networking-network-policy-deny-all", + "suite": "networking", + "tags": "common" + } + }, "networking-ocp-reserved-ports-usage": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ports-reserved-by-openshift", - "description": "Check that containers do not listen on ports that are reserved by OpenShift", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that CNF apps do not listen on ports that are reserved by OpenShift. The following ports are reserved by OpenShift and must NOT be used by any application: 22623, 22624." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.85639568 -0500 CDT m=+20.598407452", - "state": "skipped", - "testID": { - "id": "networking-ocp-reserved-ports-usage", - "suite": "networking", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ports-reserved-by-openshift", + "description": "Check that containers do not listen on ports that are reserved by OpenShift", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that CNF apps do not listen on ports that are reserved by OpenShift. The following ports are reserved by OpenShift and must NOT be used by any application: 22623, 22624." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.85639568 -0500 CDT m=+20.598407452", + "state": "skipped", + "testID": { + "id": "networking-ocp-reserved-ports-usage", + "suite": "networking", + "tags": "common" + } + }, "networking-reserved-partner-ports": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Extended", - "description": "Checks that pods and containers are not consuming ports designated as reserved by partner", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure ports are not being used that are reserved by our partner" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.8564744 -0500 CDT m=+20.598486190", - "state": "skipped", - "testID": { - "id": "networking-reserved-partner-ports", - "suite": "networking", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Extended", + "description": "Checks that pods and containers are not consuming ports designated as reserved by partner", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure ports are not being used that are reserved by our partner" }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.8564744 -0500 CDT m=+20.598486190", + "state": "skipped", + "testID": { + "id": "networking-reserved-partner-ports", + "suite": "networking", + "tags": "extended" + } + }, "networking-restart-on-reboot-sriov-pod": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that the label restart-on-reboot exists on pods that use SRIOV network interfaces.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the label restart-on-reboot exists on pods that use SRIOV network interfaces." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856510594 -0500 CDT m=+20.598522378", - "state": "skipped", - "testID": { - "id": "networking-restart-on-reboot-sriov-pod", - "suite": "networking", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that the label restart-on-reboot exists on pods that use SRIOV network interfaces.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the label restart-on-reboot exists on pods that use SRIOV network interfaces." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856510594 -0500 CDT m=+20.598522378", + "state": "skipped", + "testID": { + "id": "networking-restart-on-reboot-sriov-pod", + "suite": "networking", + "tags": "faredge" + } + }, "networking-undeclared-container-ports-usage": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", - "description": "Check that containers do not listen on ports that weren't declared in their specification. Platforms may be configured to block undeclared ports.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure the CNF apps do not listen on undeclared containers' ports." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856378233 -0500 CDT m=+20.598389993", - "state": "skipped", - "testID": { - "id": "networking-undeclared-container-ports-usage", - "suite": "networking", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-requirements-cnf-reqs", + "description": "Check that containers do not listen on ports that weren't declared in their specification. Platforms may be configured to block undeclared ports.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure the CNF apps do not listen on undeclared containers' ports." }, - "observability-container-logging": - { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-logging", - "description": "Check that all containers under test use standard input output and standard error when logging. A container must provide APIs for the platform to observe the container health and act accordingly. These APIs include health checks (liveness and readiness), logging to stderr and stdout for log aggregation (by tools such as Logstash or Filebeat), and integrate with tracing and metrics-gathering libraries (such as Prometheus or Metricbeat).", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure containers are not redirecting stdout/stderr" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 569072773, - "endTime": "2023-09-04 09:18:08.855265495 -0500 CDT m=+20.597277257", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.286192716 -0500 CDT m=+20.028204484", - "state": "passed", - "testID": { - "id": "observability-container-logging", - "suite": "observability", - "tags": "telco" - } + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856378233 -0500 CDT m=+20.598389993", + "state": "skipped", + "testID": { + "id": "networking-undeclared-container-ports-usage", + "suite": "networking", + "tags": "extended" + } + }, + "observability-container-logging": { + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"Found log line to stderr/stdout\",\"certsuite\",\"xdp\",\"xdp-c\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-logging", + "description": "Check that all containers under test use standard input output and standard error when logging. A container must provide APIs for the platform to observe the container health and act accordingly. These APIs include health checks (liveness and readiness), logging to stderr and stdout for log aggregation (by tools such as Logstash or Filebeat), and integrate with tracing and metrics-gathering libraries (such as Prometheus or Metricbeat).", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure containers are not redirecting stdout/stderr" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" + }, + "duration": 569072773, + "endTime": "2023-09-04 09:18:08.855265495 -0500 CDT m=+20.597277257", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.286192716 -0500 CDT m=+20.028204484", + "state": "passed", + "testID": { + "id": "observability-container-logging", + "suite": "observability", + "tags": "telco" + } + }, "observability-crd-status": { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Custom Resource Definition\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Custom Resource Definition Name\",\"Custom Resource Definition Version\"],\"ObjectFieldsValues\":[\"Crd has a status sub resource set\",\"crdexamples.redhat-best-practices-for-k8s.com\",\"v1\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "Checks that all CRDs have a status sub-resource specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]).", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that all the CRDs have a meaningful status specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”])." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 88933, - "endTime": "2023-09-04 09:18:08.8554558 -0500 CDT m=+20.597467570", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.855366878 -0500 CDT m=+20.597378637", - "state": "passed", - "testID": { - "id": "observability-crd-status", - "suite": "observability", - "tags": "common" - } + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Custom Resource Definition\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Custom Resource Definition Name\",\"Custom Resource Definition Version\"],\"ObjectFieldsValues\":[\"Crd has a status sub resource set\",\"crdexamples.redhat-best-practices-for-k8s.com\",\"v1\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "Checks that all CRDs have a status sub-resource specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”]).", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that all the CRDs have a meaningful status specification (Spec.versions[].Schema.OpenAPIV3Schema.Properties[“status”])." }, - "observability-pod-disruption-budget": - { - "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"StatefulSet\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"StatefulSet\",\"Pod Disruption Budget Reference\"],\"ObjectFieldsValues\":[\"StatefulSet: references PodDisruptionBudget\",\"test\",\"test-pdb-max\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations", - "description": "Checks to see if pod disruption budgets have allowed values for minAvailable and maxUnavailable", - "exceptionProcess": "No exceptions", - "remediation": "Ensure minAvailable is not zero and maxUnavailable does not equal the number of pods in the replica" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 111202, - "endTime": "2023-09-04 09:18:08.856076584 -0500 CDT m=+20.598088352", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.855965389 -0500 CDT m=+20.597977150", - "state": "passed", - "testID": { - "id": "observability-pod-disruption-budget", - "suite": "observability", - "tags": "common" - } + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, - "observability-termination-policy": - { - "capturedTestOutput": "FAILURE: [container: xdp-c pod: xdp ns: tnf] does not have a TerminationMessagePolicy: FallbackToLogsOnError\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is not FallbackToLogsOnError\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}\n%!(EXTRA []interface {}=[])", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status", - "description": "Check that all containers are using terminationMessagePolicy: FallbackToLogsOnError. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure containers are all using FallbackToLogsOnError in terminationMessagePolicy" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Mandatory" - }, - "duration": 313822, - "endTime": "2023-09-04 09:18:08.855819327 -0500 CDT m=+20.597831097", - "failureLineContent": "\t\tfail(string(bytes))", - "failureLocation": "/home/greyerof/github/tnf/pkg/testhelper/testhelper.go:367", - "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is not FallbackToLogsOnError\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}", - "startTime": "2023-09-04 09:18:08.855505505 -0500 CDT m=+20.597517275", - "state": "failed", - "testID": { - "id": "observability-termination-policy", - "suite": "observability", - "tags": "telco" - } + "duration": 88933, + "endTime": "2023-09-04 09:18:08.8554558 -0500 CDT m=+20.597467570", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.855366878 -0500 CDT m=+20.597378637", + "state": "passed", + "testID": { + "id": "observability-crd-status", + "suite": "observability", + "tags": "common" + } + }, + "observability-pod-disruption-budget": { + "capturedTestOutput": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"StatefulSet\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"StatefulSet\",\"Pod Disruption Budget Reference\"],\"ObjectFieldsValues\":[\"StatefulSet: references PodDisruptionBudget\",\"test\",\"test-pdb-max\"]}],\"NonCompliantObjectsOut\":null}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-upgrade-expectations", + "description": "Checks to see if pod disruption budgets have allowed values for minAvailable and maxUnavailable", + "exceptionProcess": "No exceptions", + "remediation": "Ensure minAvailable is not zero and maxUnavailable does not equal the number of pods in the replica" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 111202, + "endTime": "2023-09-04 09:18:08.856076584 -0500 CDT m=+20.598088352", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.855965389 -0500 CDT m=+20.597977150", + "state": "passed", + "testID": { + "id": "observability-pod-disruption-budget", + "suite": "observability", + "tags": "common" + } + }, + "observability-termination-policy": { + "capturedTestOutput": "FAILURE: [container: xdp-c pod: xdp ns: tnf] does not have a TerminationMessagePolicy: FallbackToLogsOnError\n{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is not FallbackToLogsOnError\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}\n%!(EXTRA []interface {}=[])", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-exit-status", + "description": "Check that all containers are using terminationMessagePolicy: FallbackToLogsOnError. There are different ways a pod can stop on an OpenShift cluster. One way is that the pod can remain alive but non-functional. Another way is that the pod can crash and become non-functional. In the first case, if the administrator has implemented liveness and readiness checks, OpenShift can stop the pod and either restart it on the same node or a different node in the cluster. For the second case, when the application in the pod stops, it should exit with a code and write suitable log entries to help the administrator diagnose what the issue was that caused the problem.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure containers are all using FallbackToLogsOnError in terminationMessagePolicy" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Mandatory" }, + "duration": 313822, + "endTime": "2023-09-04 09:18:08.855819327 -0500 CDT m=+20.597831097", + "failureLineContent": "\t\tfail(string(bytes))", + "failureLocation": "/home/greyerof/github/tnf/pkg/testhelper/testhelper.go:367", + "skipReason": "{\"CompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-0\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-1\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-765d6b8dcf-gbvsd\",\"test\"]},{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is FallbackToLogsOnError\",\"certsuite\",\"test-765d6b8dcf-s768n\",\"test\"]}],\"NonCompliantObjectsOut\":[{\"ObjectType\":\"Container\",\"ObjectFieldsKeys\":[\"Reason For Non Compliance\",\"Namespace\",\"Pod Name\",\"Container Name\"],\"ObjectFieldsValues\":[\"TerminationMessagePolicy is not FallbackToLogsOnError\",\"certsuite\",\"xdp\",\"xdp-c\"]}]}", + "startTime": "2023-09-04 09:18:08.855505505 -0500 CDT m=+20.597517275", + "state": "failed", + "testID": { + "id": "observability-termination-policy", + "suite": "observability", + "tags": "telco" + } + }, "operator-install-source": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "Tests whether a CNF Operator is installed via OLM.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that your Operator is installed via OLM." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856175627 -0500 CDT m=+20.598187408", - "state": "skipped", - "testID": { - "id": "operator-install-source", - "suite": "operator", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "Tests whether a CNF Operator is installed via OLM.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that your Operator is installed via OLM." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856175627 -0500 CDT m=+20.598187408", + "state": "skipped", + "testID": { + "id": "operator-install-source", + "suite": "operator", + "tags": "common" + } + }, "operator-install-status-no-privileges": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "The operator is not installed with privileged rights. Test passes if clusterPermissions is not present in the CSV manifest or is present with no resourceNames under its rules.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure all the CNF operators have no privileges on cluster resources." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856145685 -0500 CDT m=+20.598157446", - "state": "skipped", - "testID": { - "id": "operator-install-status-no-privileges", - "suite": "operator", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "The operator is not installed with privileged rights. Test passes if clusterPermissions is not present in the CSV manifest or is present with no resourceNames under its rules.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure all the CNF operators have no privileges on cluster resources." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856145685 -0500 CDT m=+20.598157446", + "state": "skipped", + "testID": { + "id": "operator-install-status-no-privileges", + "suite": "operator", + "tags": "common" + } + }, "operator-install-status-succeeded": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", - "description": "Ensures that the target CNF operators report \"Succeeded\" as their installation status.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure all the CNF operators have been successfully installed by OLM." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856116185 -0500 CDT m=+20.598127973", - "state": "skipped", - "testID": { - "id": "operator-install-status-succeeded", - "suite": "operator", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements", + "description": "Ensures that the target CNF operators report \"Succeeded\" as their installation status.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure all the CNF operators have been successfully installed by OLM." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856116185 -0500 CDT m=+20.598127973", + "state": "skipped", + "testID": { + "id": "operator-install-status-succeeded", + "suite": "operator", + "tags": "common" + } + }, "performance-exclusive-cpu-pool": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that if one container in a Pod selects an exclusive CPU pool the rest select the same type of CPU pool", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that if one container in a Pod selects an exclusive CPU pool the rest also select this type of CPU pool" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856537245 -0500 CDT m=+20.598549034", - "state": "skipped", - "testID": { - "id": "performance-exclusive-cpu-pool", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that if one container in a Pod selects an exclusive CPU pool the rest select the same type of CPU pool", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that if one container in a Pod selects an exclusive CPU pool the rest also select this type of CPU pool" }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856537245 -0500 CDT m=+20.598549034", + "state": "skipped", + "testID": { + "id": "performance-exclusive-cpu-pool", + "suite": "performance", + "tags": "faredge" + } + }, "performance-exclusive-cpu-pool-rt-scheduling-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that if application workload runs in exclusive CPU pool, it chooses RT CPU schedule policy and set the priority less than 10.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the workload running in Application exclusive CPU pool can choose RT CPU scheduling policy, but should set priority less than 10" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856591114 -0500 CDT m=+20.598602875", - "state": "skipped", - "testID": { - "id": "performance-exclusive-cpu-pool-rt-scheduling-policy", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that if application workload runs in exclusive CPU pool, it chooses RT CPU schedule policy and set the priority less than 10.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the workload running in Application exclusive CPU pool can choose RT CPU scheduling policy, but should set priority less than 10" + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856591114 -0500 CDT m=+20.598602875", + "state": "skipped", + "testID": { + "id": "performance-exclusive-cpu-pool-rt-scheduling-policy", + "suite": "performance", + "tags": "faredge" + } + }, "performance-isolated-cpu-pool-rt-scheduling-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that a workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy (such as SCHED_FIFO/SCHED_RR) with High priority." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856626041 -0500 CDT m=+20.598637802", - "state": "skipped", - "testID": { - "id": "performance-isolated-cpu-pool-rt-scheduling-policy", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that a workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the workload running in an application-isolated exclusive CPU pool selects a RT CPU scheduling policy (such as SCHED_FIFO/SCHED_RR) with High priority." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856626041 -0500 CDT m=+20.598637802", + "state": "skipped", + "testID": { + "id": "performance-isolated-cpu-pool-rt-scheduling-policy", + "suite": "performance", + "tags": "faredge" + } + }, "performance-max-resources-exec-probes": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Checks that less than 10 exec probes are configured in the cluster for this CNF. Also checks that the periodSeconds parameter for each probe is superior or equal to 10.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Reduce the number of exec probes in the cluster for this CNF to less than 10. Increase the update period of the exec probe to be superior or equal to 10 seconds." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856607947 -0500 CDT m=+20.598619718", - "state": "skipped", - "testID": { - "id": "performance-max-resources-exec-probes", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Checks that less than 10 exec probes are configured in the cluster for this CNF. Also checks that the periodSeconds parameter for each probe is superior or equal to 10.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Reduce the number of exec probes in the cluster for this CNF to less than 10. Increase the update period of the exec probe to be superior or equal to 10 seconds." }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856607947 -0500 CDT m=+20.598619718", + "state": "skipped", + "testID": { + "id": "performance-max-resources-exec-probes", + "suite": "performance", + "tags": "faredge" + } + }, "performance-rt-apps-no-exec-probes": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that if one container runs a real time application exec probes are not used", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that if one container runs a real time application exec probes are not used" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856556517 -0500 CDT m=+20.598568279", - "state": "skipped", - "testID": { - "id": "performance-rt-apps-no-exec-probes", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that if one container runs a real time application exec probes are not used", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that if one container runs a real time application exec probes are not used" + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856556517 -0500 CDT m=+20.598568279", + "state": "skipped", + "testID": { + "id": "performance-rt-apps-no-exec-probes", + "suite": "performance", + "tags": "faredge" + } + }, "performance-shared-cpu-pool-non-rt-scheduling-policy": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Ensures that if application workload runs in shared CPU pool, it chooses non-RT CPU schedule policy to always share the CPU with other applications and kernel threads.", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Ensure that the workload running in Application shared CPU pool should choose non-RT CPU schedule policy, like SCHED _OTHER to always share the CPU with other applications and kernel threads." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.856575373 -0500 CDT m=+20.598587144", - "state": "skipped", - "testID": { - "id": "performance-shared-cpu-pool-non-rt-scheduling-policy", - "suite": "performance", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Ensures that if application workload runs in shared CPU pool, it chooses non-RT CPU schedule policy to always share the CPU with other applications and kernel threads.", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Ensure that the workload running in Application shared CPU pool should choose non-RT CPU schedule policy, like SCHED _OTHER to always share the CPU with other applications and kernel threads." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.856575373 -0500 CDT m=+20.598587144", + "state": "skipped", + "testID": { + "id": "performance-shared-cpu-pool-non-rt-scheduling-policy", + "suite": "performance", + "tags": "faredge" + } + }, "platform-alteration-base-image": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-standards", - "description": "Ensures that the Container Base Image is not altered post-startup. This test is a heuristic, and ensures that there are no changes to the following directories: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that Container applications do not modify the Container Base Image. In particular, ensure that the following directories are not modified: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64 Ensure that all required binaries are built directly into the container image, and are not installed post startup." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857281407 -0500 CDT m=+20.599293168", - "state": "skipped", - "testID": { - "id": "platform-alteration-base-image", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-image-standards", + "description": "Ensures that the Container Base Image is not altered post-startup. This test is a heuristic, and ensures that there are no changes to the following directories: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that Container applications do not modify the Container Base Image. In particular, ensure that the following directories are not modified: 1) /var/lib/rpm 2) /var/lib/dpkg 3) /bin 4) /sbin 5) /lib 6) /lib64 7) /usr/bin 8) /usr/sbin 9) /usr/lib 10) /usr/lib64 Ensure that all required binaries are built directly into the container image, and are not installed post startup." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857281407 -0500 CDT m=+20.599293168", + "state": "skipped", + "testID": { + "id": "platform-alteration-base-image", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-boot-params": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os", - "description": "Tests that boot parameters are set through the MachineConfigOperator, and not set manually on the Node.", - "exceptionProcess": "No exceptions", - "remediation": "Ensure that boot parameters are set directly through the MachineConfigOperator, or indirectly through the PerformanceAddonOperator. Boot parameters should not be changed directly through the Node, as OpenShift should manage the changes for you." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857374258 -0500 CDT m=+20.599386019", - "state": "skipped", - "testID": { - "id": "platform-alteration-boot-params", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os", + "description": "Tests that boot parameters are set through the MachineConfigOperator, and not set manually on the Node.", + "exceptionProcess": "No exceptions", + "remediation": "Ensure that boot parameters are set directly through the MachineConfigOperator, or indirectly through the PerformanceAddonOperator. Boot parameters should not be changed directly through the Node, as OpenShift should manage the changes for you." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857374258 -0500 CDT m=+20.599386019", + "state": "skipped", + "testID": { + "id": "platform-alteration-boot-params", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-hugepages-1g-only": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Far Edge", - "description": "Check that pods using hugepages only use 1Gi size", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Modify pod to consume 1Gi hugepages only" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Mandatory", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.85747187 -0500 CDT m=+20.599483640", - "state": "skipped", - "testID": { - "id": "platform-alteration-hugepages-1g-only", - "suite": "platform-alteration", - "tags": "faredge" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Far Edge", + "description": "Check that pods using hugepages only use 1Gi size", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Modify pod to consume 1Gi hugepages only" + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Mandatory", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.85747187 -0500 CDT m=+20.599483640", + "state": "skipped", + "testID": { + "id": "platform-alteration-hugepages-1g-only", + "suite": "platform-alteration", + "tags": "faredge" + } + }, "platform-alteration-hugepages-2m-only": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages", - "description": "Check that pods using hugepages only use 2Mi size", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Modify pod to consume 2Mi hugepages only" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857452903 -0500 CDT m=+20.599464678", - "state": "skipped", - "testID": { - "id": "platform-alteration-hugepages-2m-only", - "suite": "platform-alteration", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages", + "description": "Check that pods using hugepages only use 2Mi size", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Modify pod to consume 2Mi hugepages only" }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857452903 -0500 CDT m=+20.599464678", + "state": "skipped", + "testID": { + "id": "platform-alteration-hugepages-2m-only", + "suite": "platform-alteration", + "tags": "extended" + } + }, "platform-alteration-hugepages-config": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages", - "description": "Checks to see that HugePage settings have been configured through MachineConfig, and not manually on the underlying Node. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet. First, the \"worker\" MachineConfig is polled, and the Hugepage settings are extracted. Next, the underlying Nodes are polled for configured HugePages through inspection of /proc/meminfo. The results are compared, and the test passes only if they are the same.", - "exceptionProcess": "No exceptions", - "remediation": "HugePage settings should be configured either directly through the MachineConfigOperator or indirectly using the PerformanceAddonOperator. This ensures that OpenShift is aware of the special MachineConfig requirements, and can provision your CNF on a Node that is part of the corresponding MachineConfigSet. Avoid making changes directly to an underlying Node, and let OpenShift handle the heavy lifting of configuring advanced settings. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857355169 -0500 CDT m=+20.599366940", - "state": "skipped", - "testID": { - "id": "platform-alteration-hugepages-config", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-huge-pages", + "description": "Checks to see that HugePage settings have been configured through MachineConfig, and not manually on the underlying Node. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet. First, the \"worker\" MachineConfig is polled, and the Hugepage settings are extracted. Next, the underlying Nodes are polled for configured HugePages through inspection of /proc/meminfo. The results are compared, and the test passes only if they are the same.", + "exceptionProcess": "No exceptions", + "remediation": "HugePage settings should be configured either directly through the MachineConfigOperator or indirectly using the PerformanceAddonOperator. This ensures that OpenShift is aware of the special MachineConfig requirements, and can provision your CNF on a Node that is part of the corresponding MachineConfigSet. Avoid making changes directly to an underlying Node, and let OpenShift handle the heavy lifting of configuring advanced settings. This test case applies only to Nodes that are configured with the \"worker\" MachineConfigSet." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857355169 -0500 CDT m=+20.599366940", + "state": "skipped", + "testID": { + "id": "platform-alteration-hugepages-config", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-hyperthread-enable": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Extended", - "description": "Check that baremetal workers have hyperthreading enabled", - "exceptionProcess": "There is no documented exception process for this.", - "remediation": "Check that baremetal workers have hyperthreading enabled" - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857264776 -0500 CDT m=+20.599276555", - "state": "skipped", - "testID": { - "id": "platform-alteration-hyperthread-enable", - "suite": "platform-alteration", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Extended", + "description": "Check that baremetal workers have hyperthreading enabled", + "exceptionProcess": "There is no documented exception process for this.", + "remediation": "Check that baremetal workers have hyperthreading enabled" + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857264776 -0500 CDT m=+20.599276555", + "state": "skipped", + "testID": { + "id": "platform-alteration-hyperthread-enable", + "suite": "platform-alteration", + "tags": "extended" + } + }, "platform-alteration-is-selinux-enforcing": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-security", - "description": "verifies that all openshift platform/cluster nodes have selinux in \"Enforcing\" mode.", - "exceptionProcess": "No exceptions", - "remediation": "Configure selinux and enable enforcing mode." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857336671 -0500 CDT m=+20.599348451", - "state": "skipped", - "testID": { - "id": "platform-alteration-is-selinux-enforcing", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-pod-security", + "description": "verifies that all openshift platform/cluster nodes have selinux in \"Enforcing\" mode.", + "exceptionProcess": "No exceptions", + "remediation": "Configure selinux and enable enforcing mode." }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857336671 -0500 CDT m=+20.599348451", + "state": "skipped", + "testID": { + "id": "platform-alteration-is-selinux-enforcing", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-isredhat-release": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-base-images", - "description": "verifies if the container base image is redhat.", - "exceptionProcess": "No exceptions", - "remediation": "Build a new container image that is based on UBI (Red Hat Universal Base Image)." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857320657 -0500 CDT m=+20.599332441", - "state": "skipped", - "testID": { - "id": "platform-alteration-isredhat-release", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-base-images", + "description": "verifies if the container base image is redhat.", + "exceptionProcess": "No exceptions", + "remediation": "Build a new container image that is based on UBI (Red Hat Universal Base Image)." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857320657 -0500 CDT m=+20.599332441", + "state": "skipped", + "testID": { + "id": "platform-alteration-isredhat-release", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-ocp-lifecycle": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-k8s", - "description": "Tests that the running OCP version is not end of life.", - "exceptionProcess": "No exceptions", - "remediation": "Please update your cluster to a version that is generally available." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857421181 -0500 CDT m=+20.599432962", - "state": "skipped", - "testID": { - "id": "platform-alteration-ocp-lifecycle", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-k8s", + "description": "Tests that the running OCP version is not end of life.", + "exceptionProcess": "No exceptions", + "remediation": "Please update your cluster to a version that is generally available." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857421181 -0500 CDT m=+20.599432962", + "state": "skipped", + "testID": { + "id": "platform-alteration-ocp-lifecycle", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-ocp-node-os-lifecycle": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os", - "description": "Tests that the nodes running in the cluster have operating systems that are compatible with the deployed version of OpenShift.", - "exceptionProcess": "No exceptions", - "remediation": "Please update your workers to a version that is supported by your version of OpenShift" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857437847 -0500 CDT m=+20.599449631", - "state": "skipped", - "testID": { - "id": "platform-alteration-ocp-node-os-lifecycle", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-host-os", + "description": "Tests that the nodes running in the cluster have operating systems that are compatible with the deployed version of OpenShift.", + "exceptionProcess": "No exceptions", + "remediation": "Please update your workers to a version that is supported by your version of OpenShift" }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857437847 -0500 CDT m=+20.599449631", + "state": "skipped", + "testID": { + "id": "platform-alteration-ocp-node-os-lifecycle", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-service-mesh-usage": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "No Doc Link - Extended", - "description": "Checks if the istio namespace (\"istio-system\") is present. If it is present, checks that the istio sidecar is present in all pods under test.", - "exceptionProcess": "No exception needed for optional/extended tests.", - "remediation": "Ensure all the CNF pods are using service mesh if the cluster provides it." - }, - "categoryClassification": { - "Extended": "Optional", - "FarEdge": "Optional", - "NonTelco": "Optional", - "Telco": "Optional" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857405894 -0500 CDT m=+20.599417668", - "state": "skipped", - "testID": { - "id": "platform-alteration-service-mesh-usage", - "suite": "platform-alteration", - "tags": "extended" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "No Doc Link - Extended", + "description": "Checks if the istio namespace (\"istio-system\") is present. If it is present, checks that the istio sidecar is present in all pods under test.", + "exceptionProcess": "No exception needed for optional/extended tests.", + "remediation": "Ensure all the CNF pods are using service mesh if the cluster provides it." + }, + "categoryClassification": { + "Extended": "Optional", + "FarEdge": "Optional", + "NonTelco": "Optional", + "Telco": "Optional" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857405894 -0500 CDT m=+20.599417668", + "state": "skipped", + "testID": { + "id": "platform-alteration-service-mesh-usage", + "suite": "platform-alteration", + "tags": "extended" + } + }, "platform-alteration-sysctl-config": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", - "description": "Tests that no one has changed the node's sysctl configs after the node was created, the tests works by checking if the sysctl configs are consistent with the MachineConfig CR which defines how the node should be configured", - "exceptionProcess": "No exceptions", - "remediation": "You should recreate the node or change the sysctls, recreating is recommended because there might be other unknown changes" - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857389766 -0500 CDT m=+20.599401540", - "state": "skipped", - "testID": { - "id": "platform-alteration-sysctl-config", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-security", + "description": "Tests that no one has changed the node's sysctl configs after the node was created, the tests works by checking if the sysctl configs are consistent with the MachineConfig CR which defines how the node should be configured", + "exceptionProcess": "No exceptions", + "remediation": "You should recreate the node or change the sysctls, recreating is recommended because there might be other unknown changes" + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857389766 -0500 CDT m=+20.599401540", + "state": "skipped", + "testID": { + "id": "platform-alteration-sysctl-config", + "suite": "platform-alteration", + "tags": "common" + } + }, "platform-alteration-tainted-node-kernel": { - "capturedTestOutput": "", - "catalogInfo": { - "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", - "description": "Ensures that the Node(s) hosting CNFs do not utilize tainted kernels. This test case is especially important to support Highly Available CNFs, since when a CNF is re-instantiated on a backup Node, that Node's kernel may not have the same hacks.'", - "exceptionProcess": "If taint is necessary, document details of the taint and why it's needed by workload or environment.", - "remediation": "Test failure indicates that the underlying Node's kernel is tainted. Ensure that you have not altered underlying Node(s) kernels in order to run the CNF." - }, - "categoryClassification": { - "Extended": "Mandatory", - "FarEdge": "Mandatory", - "NonTelco": "Mandatory", - "Telco": "Mandatory" - }, - "duration": 0, - "endTime": "0001-01-01 00:00:00 +0000 UTC", - "failureLineContent": "", - "failureLocation": ":0", - "skipReason": "", - "startTime": "2023-09-04 09:18:08.857303112 -0500 CDT m=+20.599314872", - "state": "skipped", - "testID": { - "id": "platform-alteration-tainted-node-kernel", - "suite": "platform-alteration", - "tags": "common" - } + "capturedTestOutput": "", + "catalogInfo": { + "bestPracticeReference": "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-high-level-cnf-expectations", + "description": "Ensures that the Node(s) hosting CNFs do not utilize tainted kernels. This test case is especially important to support Highly Available CNFs, since when a CNF is re-instantiated on a backup Node, that Node's kernel may not have the same hacks.'", + "exceptionProcess": "If taint is necessary, document details of the taint and why it's needed by workload or environment.", + "remediation": "Test failure indicates that the underlying Node's kernel is tainted. Ensure that you have not altered underlying Node(s) kernels in order to run the CNF." + }, + "categoryClassification": { + "Extended": "Mandatory", + "FarEdge": "Mandatory", + "NonTelco": "Mandatory", + "Telco": "Mandatory" + }, + "duration": 0, + "endTime": "0001-01-01 00:00:00 +0000 UTC", + "failureLineContent": "", + "failureLocation": ":0", + "skipReason": "", + "startTime": "2023-09-04 09:18:08.857303112 -0500 CDT m=+20.599314872", + "state": "skipped", + "testID": { + "id": "platform-alteration-tainted-node-kernel", + "suite": "platform-alteration", + "tags": "common" } + } }, "versions": { "claimFormat": "v0.1.0", diff --git a/expected_results.yaml b/expected_results.yaml index 4bce1fe8f..e1311302f 100644 --- a/expected_results.yaml +++ b/expected_results.yaml @@ -70,7 +70,7 @@ testCases: - platform-alteration-isredhat-release - platform-alteration-tainted-node-kernel fail: - - affiliated-certification-container-is-certified-digest # test container image is not certified + - affiliated-certification-container-is-certified-digest # test container image is not certified skip: - access-control-sys-ptrace-capability - access-control-sys-nice-realtime-capability @@ -80,7 +80,6 @@ testCases: - lifecycle-cpu-isolation - lifecycle-statefulset-scaling - lifecycle-storage-provisioner - - networking-dpdk-cpu-pinning-exec-probe - networking-icmpv6-connectivity - networking-restart-on-reboot-sriov-pod - networking-network-attachment-definition-sriov-mtu @@ -94,6 +93,7 @@ testCases: - operator-pods-no-hugepages - operator-multiple-same-operators - operator-catalogsource-bundle-count + - performance-cpu-pinning-no-exec-probes - performance-exclusive-cpu-pool-rt-scheduling-policy - performance-isolated-cpu-pool-rt-scheduling-policy - performance-shared-cpu-pool-non-rt-scheduling-policy diff --git a/pkg/provider/filters.go b/pkg/provider/filters.go index 4d53999d5..0fdc53bf5 100644 --- a/pkg/provider/filters.go +++ b/pkg/provider/filters.go @@ -18,10 +18,6 @@ package provider import ( "fmt" - "strings" - - "github.com/redhat-best-practices-for-k8s/certsuite/internal/clientsholder" - "github.com/redhat-best-practices-for-k8s/certsuite/internal/log" ) // GetGuaranteedPodsWithExclusiveCPUs returns a slice of Pod objects that are guaranteed to have exclusive CPUs. @@ -113,11 +109,6 @@ func (env *TestEnvironment) GetHugepagesPods() []*Pod { return filteredPods } -// GetCPUPinningPodsWithDpdk returns a slice of Pods that have CPU pinning enabled with DPDK. -func (env *TestEnvironment) GetCPUPinningPodsWithDpdk() []*Pod { - return filterDPDKRunningPods(env.GetGuaranteedPodsWithExclusiveCPUs()) -} - func filterPodsWithoutHostPID(pods []*Pod) []*Pod { var withoutHostPIDPods []*Pod @@ -130,31 +121,6 @@ func filterPodsWithoutHostPID(pods []*Pod) []*Pod { return withoutHostPIDPods } -func filterDPDKRunningPods(pods []*Pod) []*Pod { - var filteredPods []*Pod - const ( - dpdkDriver = "vfio-pci" - findDeviceSubCommand = "find /sys -name" - ) - o := clientsholder.GetClientsHolder() - for _, pod := range pods { - if len(pod.MultusPCIs) == 0 { - continue - } - ctx := clientsholder.NewContext(pod.Namespace, pod.Name, pod.Spec.Containers[0].Name) - findCommand := fmt.Sprintf("%s '%s'", findDeviceSubCommand, pod.MultusPCIs[0]) - outStr, errStr, err := o.ExecCommandContainer(ctx, findCommand) - if err != nil || errStr != "" { - log.Error("Failed to execute command %s in probe %s, errStr: %s, err: %v", findCommand, pod.String(), errStr, err) - continue - } - if strings.Contains(outStr, dpdkDriver) { - filteredPods = append(filteredPods, pod) - } - } - return filteredPods -} - // GetShareProcessNamespacePods returns a slice of Pod objects that have the ShareProcessNamespace flag set to true. // It iterates over the Pods in the TestEnvironment and filters out the ones that do not have the ShareProcessNamespace flag set. // The filtered Pods are then returned as a slice. diff --git a/pkg/testhelper/testhelper.go b/pkg/testhelper/testhelper.go index 4c20858d7..86d62c48d 100644 --- a/pkg/testhelper/testhelper.go +++ b/pkg/testhelper/testhelper.go @@ -446,16 +446,6 @@ func GetDaemonSetFailedToSpawnSkipFn(env *provider.TestEnvironment) func() (bool } } -func GetNoCPUPinningPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) { - return func() (bool, string) { - if len(env.GetCPUPinningPodsWithDpdk()) == 0 { - return true, "no CPU pinning pods to check found" - } - - return false, "" - } -} - func GetNoSRIOVPodsSkipFn(env *provider.TestEnvironment) func() (bool, string) { return func() (bool, string) { pods, err := env.GetPodsUsingSRIOV() diff --git a/tests/identifiers/doclinks.go b/tests/identifiers/doclinks.go index b5af25d76..00a6a034d 100644 --- a/tests/identifiers/doclinks.go +++ b/tests/identifiers/doclinks.go @@ -11,7 +11,6 @@ const ( TestICMPv4ConnectivityIdentifierDocLink = "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-ipv4-&-ipv6" TestNetworkPolicyDenyAllIdentifierDocLink = "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-vrfs-aka-routing-instances" TestReservedExtendedPartnerPortsDocLink = NoDocLinkExtended - TestDpdkCPUPinningExecProbeDocLink = "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning" TestRestartOnRebootLabelOnPodsUsingSRIOVDocLink = NoDocLinkFarEdge TestNetworkAttachmentDefinitionSRIOVUsingMTUDocLink = "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-multus-sr-iov---macvlan" TestLimitedUseOfExecProbesIdentifierDocLink = NoDocLinkFarEdge @@ -101,6 +100,7 @@ const ( TestExclusiveCPUPoolSchedulingPolicyDocLink = NoDocLinkFarEdge TestIsolatedCPUPoolSchedulingPolicyDocLink = NoDocLinkFarEdge TestRtAppNoExecProbesDocLink = NoDocLinkFarEdge + TestCPUPinningNoExecProbesDocLink = "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cpu-manager-pinning" // Operator Test Suite DocOperatorRequirement = "https://redhat-best-practices-for-k8s.github.io/guide/#k8s-best-practices-cnf-operator-requirements" diff --git a/tests/identifiers/identifiers.go b/tests/identifiers/identifiers.go index d53ef1081..1c28ca7f7 100644 --- a/tests/identifiers/identifiers.go +++ b/tests/identifiers/identifiers.go @@ -89,7 +89,6 @@ var ( TestAffinityRequiredPods claim.Identifier TestContainerPostStartIdentifier claim.Identifier TestContainerPrestopIdentifier claim.Identifier - TestDpdkCPUPinningExecProbe claim.Identifier TestSysAdminIdentifier claim.Identifier TestNetAdminIdentifier claim.Identifier TestNetRawIdentifier claim.Identifier @@ -101,6 +100,7 @@ var ( TestExclusiveCPUPoolSchedulingPolicy claim.Identifier TestIsolatedCPUPoolSchedulingPolicy claim.Identifier TestRtAppNoExecProbes claim.Identifier + TestCPUPinningNoExecProbes claim.Identifier TestRestartOnRebootLabelOnPodsUsingSRIOV claim.Identifier TestSecConNonRootUserIDIdentifier claim.Identifier TestNetworkAttachmentDefinitionSRIOVUsingMTU claim.Identifier @@ -393,22 +393,6 @@ func InitCatalog() map[claim.Identifier]claim.TestCaseDescription { }, TagTelco) - TestDpdkCPUPinningExecProbe = AddCatalogEntry( - "dpdk-cpu-pinning-exec-probe", - common.NetworkingTestKey, - `If a workload is doing CPU pinning, exec probes may not be used.`, - DpdkCPUPinningExecProbeRemediation, - NoDocumentedProcess, - TestDpdkCPUPinningExecProbeDocLink, - true, - map[string]string{ - FarEdge: Mandatory, - Telco: Mandatory, - NonTelco: Optional, - Extended: Mandatory, - }, - TagTelco) - TestNetAdminIdentifier = AddCatalogEntry( "net-admin-capability-check", common.AccessControlTestKey, @@ -569,6 +553,22 @@ func InitCatalog() map[claim.Identifier]claim.TestCaseDescription { }, TagFarEdge) + TestCPUPinningNoExecProbes = AddCatalogEntry( + "cpu-pinning-no-exec-probes", + common.PerformanceTestKey, + `Workloads utilizing CPU pinning (Guaranteed QoS with exclusive CPUs) should not use exec probes. Exec probes run a command within the container, which could interfere with latency-sensitive workloads and cause performance degradation.`, + CPUPinningNoExecProbesRemediation, + NoDocumentedProcess, + TestCPUPinningNoExecProbesDocLink, + true, + map[string]string{ + FarEdge: Mandatory, + Telco: Mandatory, + NonTelco: Optional, + Extended: Mandatory, + }, + TagTelco) + TestRestartOnRebootLabelOnPodsUsingSRIOV = AddCatalogEntry( "restart-on-reboot-sriov-pod", common.NetworkingTestKey, diff --git a/tests/identifiers/impact.go b/tests/identifiers/impact.go index d3967400c..1e23451c7 100644 --- a/tests/identifiers/impact.go +++ b/tests/identifiers/impact.go @@ -27,7 +27,7 @@ const ( TestICMPv4ConnectivityIdentifierImpact = `Failure indicates potential network isolation issues that could prevent workload components from communicating, leading to service degradation or complete application failure.` TestNetworkPolicyDenyAllIdentifierImpact = `Without default deny-all network policies, workloads are exposed to lateral movement attacks and unauthorized network access, compromising security posture and potentially enabling data breaches.` TestReservedExtendedPartnerPortsImpact = `Using reserved ports can cause port conflicts with essential platform services, leading to service startup failures and unpredictable application behavior.` - TestDpdkCPUPinningExecProbeImpact = `Exec probes on CPU-pinned DPDK workloads can cause performance degradation, interrupt real-time operations, and potentially crash applications due to resource contention.` + TestCPUPinningNoExecProbesImpact = `Exec probes on workloads with CPU pinning (exclusive CPUs) can cause performance degradation, interrupt latency-sensitive operations, and potentially crash applications due to resource contention. Any workload requiring exclusive CPUs inherently needs non-interruptible task execution.` TestRestartOnRebootLabelOnPodsUsingSRIOVImpact = `Without restart-on-reboot labels, SRIOV-enabled pods may fail to recover from a race condition between kubernetes services startup and SR-IOV device plugin configuration on StarlingX AIO systems, causing SR-IOV devices to disappear from running pods when FPGA devices are reset.` TestNetworkAttachmentDefinitionSRIOVUsingMTUImpact = `Incorrect MTU settings can cause packet fragmentation, network performance issues, and connectivity failures in high-performance networking scenarios.` TestLimitedUseOfExecProbesIdentifierImpact = `Excessive exec probes can overwhelm system resources, degrade performance, and interfere with critical application operations in resource-constrained environments.` @@ -169,7 +169,7 @@ var ImpactMap = map[string]string{ "networking-icmpv4-connectivity": TestICMPv4ConnectivityIdentifierImpact, "networking-network-policy-deny-all": TestNetworkPolicyDenyAllIdentifierImpact, "networking-reserved-partner-ports": TestReservedExtendedPartnerPortsImpact, - "networking-dpdk-cpu-pinning-exec-probe": TestDpdkCPUPinningExecProbeImpact, + "performance-cpu-pinning-no-exec-probes": TestCPUPinningNoExecProbesImpact, "networking-restart-on-reboot-sriov-pod": TestRestartOnRebootLabelOnPodsUsingSRIOVImpact, "networking-network-attachment-definition-sriov-mtu": TestNetworkAttachmentDefinitionSRIOVUsingMTUImpact, "performance-max-resources-exec-probes": TestLimitedUseOfExecProbesIdentifierImpact, diff --git a/tests/identifiers/remediation.go b/tests/identifiers/remediation.go index 7f22f6fc4..0237c025d 100644 --- a/tests/identifiers/remediation.go +++ b/tests/identifiers/remediation.go @@ -188,8 +188,6 @@ const ( ContainerPortNameFormatRemediation = `Ensure that the container's ports name follow our partner naming conventions` - DpdkCPUPinningExecProbeRemediation = "If the workload is doing CPU pinning and running a DPDK process do not use exec probes (executing a command within the container) as it may pile up and block the node eventually." - CheckStorageProvisionerRemediation = `Use a non-local storage (e.g. no kubernetes.io/no-provisioner and no topolvm.io provisioners) in multinode clusters. Local storage are recommended for single node clusters only, but a single local provisioner should be installed.` ExclusiveCPUPoolRemediation = `Ensure that if one container in a Pod selects an exclusive CPU pool the rest also select this type of CPU pool` @@ -202,6 +200,8 @@ const ( RtAppNoExecProbesRemediation = `Ensure that if one container runs a real time application exec probes are not used` + CPUPinningNoExecProbesRemediation = `Workloads that use CPU pinning (Guaranteed QoS with exclusive CPUs) should not use exec probes. Use httpGet or tcpSocket probes instead, as exec probes can interfere with latency-sensitive workloads requiring non-interruptible task execution.` + SRIOVPodsRestartOnRebootLabelRemediation = `Ensure that the label restart-on-reboot exists on pods that use SRIOV network interfaces.` SRIOVNetworkAttachmentDefinitionMTURemediation = `Ensure that the MTU of the SR-IOV network attachment definition is set explicitly.` diff --git a/tests/networking/suite.go b/tests/networking/suite.go index 6f61cd94d..8f0a72189 100644 --- a/tests/networking/suite.go +++ b/tests/networking/suite.go @@ -133,15 +133,6 @@ func LoadChecks() { return nil })) - // DPDK CPU pinning exec probe test case - checksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestDpdkCPUPinningExecProbe)). - WithSkipCheckFn(testhelper.GetNoCPUPinningPodsSkipFn(&env)). - WithCheckFn(func(c *checksdb.Check) error { - dpdkPods := env.GetCPUPinningPodsWithDpdk() - testExecProbDenyAtCPUPinning(c, dpdkPods) - return nil - })) - // Restart on reboot label test case checksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestRestartOnRebootLabelOnPodsUsingSRIOV)). WithSkipCheckFn(testhelper.GetNoSRIOVPodsSkipFn(&env)). @@ -168,29 +159,6 @@ func LoadChecks() { })) } -func testExecProbDenyAtCPUPinning(check *checksdb.Check, dpdkPods []*provider.Pod) { - var compliantObjects []*testhelper.ReportObject - var nonCompliantObjects []*testhelper.ReportObject - - for _, cpuPinnedPod := range dpdkPods { - execProbeFound := false - for _, cut := range cpuPinnedPod.Containers { - check.LogInfo("Testing Container %q", cut) - if cut.HasExecProbes() { - check.LogError("Container %q defines an exec probe", cut) - nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, "Exec prob is not allowed", false)) - execProbeFound = true - } - } - - if !execProbeFound { - check.LogInfo("Pod %q does not define any exec probe", cpuPinnedPod) - compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, "Exec prob is allowed", true)) - } - } - check.SetResult(compliantObjects, nonCompliantObjects) -} - //nolint:funlen func testUndeclaredContainerPortsUsage(check *checksdb.Check, env *provider.TestEnvironment) { var compliantObjects []*testhelper.ReportObject diff --git a/tests/performance/suite.go b/tests/performance/suite.go index 8c5e4876e..66c51723a 100644 --- a/tests/performance/suite.go +++ b/tests/performance/suite.go @@ -126,6 +126,14 @@ func LoadChecks() { testLimitedUseOfExecProbes(c, &env) return nil })) + + checksGroup.Add(checksdb.NewCheck(identifiers.GetTestIDAndLabels(identifiers.TestCPUPinningNoExecProbes)). + WithSkipCheckFn(skipIfNoGuaranteedPodContainersWithExclusiveCPUs). + WithCheckFn(func(c *checksdb.Check) error { + cpuPinnedPods := env.GetGuaranteedPodsWithExclusiveCPUs() + testCPUPinningNoExecProbes(c, cpuPinnedPods) + return nil + })) } //nolint:funlen @@ -383,3 +391,26 @@ func filterProbeProcesses(allProcesses []*crclient.Process, cut *provider.Contai } return notExecProbeProcesses, compliantObjects } + +func testCPUPinningNoExecProbes(check *checksdb.Check, cpuPinnedPods []*provider.Pod) { + var compliantObjects []*testhelper.ReportObject + var nonCompliantObjects []*testhelper.ReportObject + + for _, cpuPinnedPod := range cpuPinnedPods { + execProbeFound := false + for _, cut := range cpuPinnedPod.Containers { + check.LogInfo("Testing Container %q", cut) + if cut.HasExecProbes() { + check.LogError("Container %q defines an exec probe", cut) + nonCompliantObjects = append(nonCompliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, "Exec probe is not allowed on CPU-pinned pods", false)) + execProbeFound = true + } + } + + if !execProbeFound { + check.LogInfo("Pod %q does not define any exec probe", cpuPinnedPod) + compliantObjects = append(compliantObjects, testhelper.NewPodReportObject(cpuPinnedPod.Namespace, cpuPinnedPod.Name, "No exec probes found", true)) + } + } + check.SetResult(compliantObjects, nonCompliantObjects) +}